train
list
[ { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex c58cfeb50..679e57ff2 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -666,7 +666,7 @@ def load_checkpoint_in_model(\n elif len(potential_index) == 1:\n index_filename = os.path.join(checkpoint, potential_index[0])\n else:\n- raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\n+ raise ValueError(f\"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones.\")\n else:\n raise ValueError(\n \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/955", "pr_id": 1188526483 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex d3247a464..75a2c06f6 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -404,6 +404,7 @@ def load_checkpoint_and_dispatch(\n offload_folder=offload_folder,\n dtype=dtype,\n offload_state_dict=offload_state_dict,\n+ offload_buffers=offload_buffers,\n )\n if device_map is None:\n return model\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 2774b18ce..c58cfeb50 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -607,6 +607,7 @@ def load_checkpoint_in_model(\n offload_folder: Optional[Union[str, os.PathLike]] = None,\n dtype: Optional[Union[str, torch.dtype]] = None,\n offload_state_dict: bool = False,\n+ offload_buffers: bool = False,\n ):\n \"\"\"\n Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n@@ -636,6 +637,8 @@ def load_checkpoint_in_model(\n offload_state_dict (`bool`, *optional*, defaults to `False`):\n If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\n the weight of the CPU state dict + the biggest shard does not fit.\n+ offload_buffers (`bool`, *optional*, defaults to `False):\n+ Whether or not to include the buffers in the weights offloaded to disk.\n \"\"\"\n if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n raise ValueError(\n@@ -687,6 +690,8 @@ def load_checkpoint_in_model(\n state_dict_folder = tempfile.mkdtemp()\n state_dict_index = {}\n \n+ buffer_names = [name for name, _ in model.named_buffers()]\n+\n for checkpoint_file in checkpoint_files:\n checkpoint = torch.load(checkpoint_file)\n if device_map is None:\n@@ -703,7 +708,8 @@ def load_checkpoint_in_model(\n param_device = device_map[module_name]\n \n if param_device == \"disk\":\n- set_module_tensor_to_device(model, param_name, \"meta\")\n+ if offload_buffers or param_name not in buffer_names:\n+ set_module_tensor_to_device(model, param_name, \"meta\")\n offload_weight(param, param_name, offload_folder, index=offload_index)\n elif param_device == \"cpu\" and offload_state_dict:\n set_module_tensor_to_device(model, param_name, \"meta\")\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex 16243d5e1..644d297b2 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -275,6 +275,31 @@ def test_load_checkpoint_in_model_one_gpu(self):\n self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n \n+ @require_cuda\n+ def test_load_checkpoint_in_model_disk_offload(self):\n+ device_map = {\"linear1\": \"cpu\", \"batchnorm\": \"disk\", \"linear2\": \"cpu\"}\n+\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ # Buffers are not offloaded by default\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n @require_multi_gpu\n def test_load_checkpoint_in_model_two_gpu(self):\n device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": 1}\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/951", "pr_id": 1183897941 }, { "diff": "diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex fceb0bd23..03a4e346c 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -243,10 +243,7 @@ def test_deepspeed_plugin(self, stage):\n \n @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\n def test_accelerate_state_deepspeed(self, dtype):\n- state = AcceleratorState(_from_accelerator=True)\n- if state.initialized:\n- state.initialized = False\n-\n+ AcceleratorState._reset_state()\n deepspeed_plugin = DeepSpeedPlugin(\n gradient_accumulation_steps=1,\n gradient_clipping=1.0,\n@@ -259,7 +256,6 @@ def test_accelerate_state_deepspeed(self, dtype):\n with mockenv_context(**self.dist_env):\n state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state\n self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\n- state.initialized = False\n \n def test_init_zero3(self):\n deepspeed_plugin = DeepSpeedPlugin(\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/950", "pr_id": 1183653396 }, { "diff": "diff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 31a6d64a2..d97327de4 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -35,6 +35,14 @@\n import torch_xla.core.xla_model as xm\n \n \n+def is_initialized() -> bool:\n+ \"\"\"\n+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,\n+ but works as a module method.\n+ \"\"\"\n+ return AcceleratorState._shared_state != {}\n+\n+\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\n@@ -45,6 +53,7 @@ class AcceleratorState:\n - **device** (`torch.device`) -- The device to use.\n - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently\n in use.\n+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.\n - **local_process_index** (`int`) -- The index of the current process on the current server.\n - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type\n of mixed precision being performed.\n@@ -69,8 +78,7 @@ def __init__(\n if parse_flag_from_env(\"ACCELERATE_USE_CPU\"):\n cpu = True\n self._check_initialized(mixed_precision, cpu)\n- self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n- if not getattr(self, \"initialized\", False):\n+ if not self.initialized:\n self.backend = None\n self.deepspeed_plugin = None\n mixed_precision = (\n@@ -245,18 +253,17 @@ def __init__(\n and self.device.type == \"cuda\"\n ):\n torch.backends.cuda.matmul.allow_tf32 = True\n- self.initialized = True\n \n- def __repr__(self):\n- mixed_precision = self.mixed_precision\n+ self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n \n+ def __repr__(self):\n repr = (\n f\"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\\n\"\n f\"Num processes: {self.num_processes}\\n\"\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\n f\"Device: {self.device}\\n\"\n- f\"Mixed precision type: {mixed_precision}\\n\"\n+ f\"Mixed precision type: {self.mixed_precision}\\n\"\n )\n if self.distributed_type == DistributedType.DEEPSPEED:\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\n\"\n@@ -286,9 +293,14 @@ def _reset_state():\n \"Resets `_shared_state`, is used internally and should not be called\"\n AcceleratorState._shared_state = {}\n \n+ @property\n+ def initialized(self) -> bool:\n+ \"Returns whether the `AcceleratorState` has been initialized\"\n+ return self._shared_state != {}\n+\n def _check_initialized(self, mixed_precision=None, cpu=None):\n \"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized\"\n- if getattr(self, \"initialized\", False):\n+ if self.initialized:\n err = \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`.\"\n if cpu and self.device.type != \"cpu\":\n raise ValueError(err.format(flag=\"cpu=True\"))\n@@ -311,11 +323,15 @@ class GradientState:\n \n def __init__(self):\n self.__dict__ = self._shared_state\n- if not getattr(self, \"initialized\", False):\n+ if not self.initialized:\n self.sync_gradients = True\n self.end_of_dataloader = False\n self.remainder = -1\n- self.initialized = True\n+\n+ @property\n+ def initialized(self) -> bool:\n+ \"Returns whether the `GradientState` has been initialized\"\n+ return GradientState._shared_state != {}\n \n def __repr__(self):\n return (\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/949", "pr_id": 1183552649 }, { "diff": "diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex 22f51cce1..84c115fd3 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -34,7 +34,7 @@ def offload_weight(weight, weight_name, offload_folder, index=None):\n # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.\n weight = weight.view(torch.int16)\n dtype = \"bfloat16\"\n- array = weight.numpy()\n+ array = weight.cpu().numpy()\n tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\n if index is not None:\n if dtype is None:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/945", "pr_id": 1178312394 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex ec5041bfa..7c07e9105 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -363,7 +363,7 @@ def __init__(\n if (\n self.state.mixed_precision == \"fp16\"\n and self.device.type != \"cpu\"\n- and self.distributed_type != DistributedType.MEGATRON_LM\n+ and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)\n ):\n self.native_amp = True\n if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\n@@ -375,10 +375,10 @@ def __init__(\n self.scaler = ShardedGradScaler(**kwargs)\n else:\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n- elif (\n- self.state.mixed_precision == \"bf16\"\n- and self.distributed_type != DistributedType.FSDP\n- and self.distributed_type != DistributedType.MEGATRON_LM\n+ elif self.state.mixed_precision == \"bf16\" and self.distributed_type not in (\n+ DistributedType.DEEPSPEED,\n+ DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ):\n if self.device.type == \"cpu\":\n self.native_amp = is_torch_version(\">=\", \"1.10\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/943", "pr_id": 1176583667 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 7afebea48..379d6454e 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -37,7 +37,10 @@\n _available_trackers = []\n \n if is_tensorboard_available():\n- from torch.utils import tensorboard\n+ try:\n+ from torch.utils import tensorboard\n+ except ModuleNotFoundError:\n+ import tensorboardX as tensorboard\n \n _available_trackers.append(LoggerType.TENSORBOARD)\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/942", "pr_id": 1176562235 }, { "diff": "diff --git a/docs/source/usage_guides/deepspeed.mdx b/docs/source/usage_guides/deepspeed.mdx\nindex 29561c77b..0377296c1 100644\n--- a/docs/source/usage_guides/deepspeed.mdx\n+++ b/docs/source/usage_guides/deepspeed.mdx\n@@ -395,6 +395,196 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a sample script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. Content of the `accelerate` config:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------\n+In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------\n+Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. Content of the `accelerate` config:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `\"auto\"` in the DeepSpeed` configuration file and check that things work as expected.\n+\n+1. New `ds_config.json` with `\"auto\"` for the `accelerate launch` DeepSpeed command arguments:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": \"auto\"\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": \"auto\",\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\n+ \"offload_optimizer\": {\n+ \"device\": \"auto\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"auto\"\n+ }\n+ },\n+ \"gradient_clipping\": \"auto\",\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: fp16\n+ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}\n+```\n+\n+**Note**: Remaining `\"auto\"` values are handled in `accelerator.prepare()` call as explained in point 2 of \n+`Important code changes when using DeepSpeed Config File`.\n+\n ## Saving and loading\n \n 1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex a3dcd2dcb..50acf55a1 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -460,7 +460,7 @@ def get_cluster_input():\n \n if distributed_type != DistributedType.TPU:\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n- mixed_precision = \"no\"\n+ mixed_precision = None\n else:\n mixed_precision = _ask_options(\n \"Do you wish to use FP16 or BF16 (mixed precision)?\",\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex ba492802e..8b4a28292 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -78,6 +78,7 @@ def to_dict(self):\n for key, value in result.items():\n if isinstance(value, Enum):\n result[key] = value.value\n+ result = {k: v for k, v in result.items() if v is not None}\n return result\n \n @classmethod\n@@ -88,7 +89,7 @@ def from_json_file(cls, json_file=None):\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n if \"mixed_precision\" not in config_dict:\n- config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else None\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\n@@ -111,7 +112,7 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n \n if \"mixed_precision\" not in config_dict:\n- config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else None\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 91d4427ac..b5f831b47 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--offload_param_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--gradient_accumulation_steps\",\n default=None,\n type=int,\n- help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `1`.\",\n )\n deepspeed_args.add_argument(\n \"--gradient_clipping\",\n default=None,\n type=float,\n- help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `1.0`.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=\"true\",\n+ default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n- \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n+ \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n- default=\"false\",\n+ default=None,\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n- \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n+ \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.\",\n )\n deepspeed_args.add_argument(\n \"--deepspeed_hostfile\",\n@@ -363,7 +368,7 @@ def launch_command_parser(subparsers=None):\n \"--deepspeed_multinode_launcher\",\n default=None,\n type=str,\n- help=\"DeepSpeed multi-node launcher to use.\",\n+ help=\"DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.\",\n )\n \n # fsdp arguments\n@@ -717,14 +722,22 @@ def deepspeed_launcher(args):\n \n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n+ current_env[\"ACCELERATE_CONFIG_DS_FIELDS\"] = str(args.deepspeed_fields_from_accelerate_config).lower()\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n- current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n- current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n- current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n+ if args.zero_stage is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n+ if args.gradient_accumulation_steps is not None:\n+ current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n+ if args.gradient_clipping is not None:\n+ current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n+ if args.offload_optimizer_device is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n+ if args.offload_param_device is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n+ if args.zero3_init_flag is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n+ if args.zero3_save_16bit_model is not None:\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n if args.deepspeed_config_file is not None:\n current_env[\"ACCELERATE_DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\n \n@@ -966,6 +979,7 @@ def launch_command(args):\n \n defaults = None\n warned = []\n+ mp_from_config_flag = False\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n@@ -1013,7 +1027,12 @@ def launch_command(args):\n ):\n setattr(args, name, attr)\n if not args.mixed_precision:\n- args.mixed_precision = defaults.mixed_precision\n+ if defaults.mixed_precision is None:\n+ args.mixed_precision = \"no\"\n+ else:\n+ args.mixed_precision = defaults.mixed_precision\n+ mp_from_config_flag = True\n+\n if args.dynamo_backend is None:\n warned.append(\"\\t`--dynamo_backend` was set to a value of `'no'`\")\n args.dynamo_backend = \"no\"\n@@ -1056,6 +1075,10 @@ def launch_command(args):\n \n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []\n+ if mp_from_config_flag:\n+ args.deepspeed_fields_from_accelerate_config.append(\"mixed_precision\")\n+ args.deepspeed_fields_from_accelerate_config = \",\".join(args.deepspeed_fields_from_accelerate_config)\n deepspeed_launcher(args)\n elif args.use_fsdp and not args.cpu:\n multi_gpu_launcher(args)\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 5a87d7860..01f174a10 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -394,6 +394,28 @@ class DeepSpeedPlugin:\n def __post_init__(self):\n from .deepspeed import HfDeepSpeedConfig\n \n+ if self.gradient_accumulation_steps is None:\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n+\n+ if self.gradient_clipping is None:\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n+ if gradient_clipping != \"none\":\n+ self.gradient_clipping = float(gradient_clipping)\n+\n+ if self.zero_stage is None:\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n+\n+ if self.offload_optimizer_device is None:\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+\n+ if self.offload_param_device is None:\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+\n+ if self.zero3_save_16bit_model is None:\n+ self.zero3_save_16bit_model = (\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ )\n+\n if self.hf_ds_config is None:\n self.hf_ds_config = os.environ.get(\"ACCELERATE_DEEPSPEED_CONFIG_FILE\", \"none\")\n if (\n@@ -405,33 +427,22 @@ def __post_init__(self):\n self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)\n if \"gradient_accumulation_steps\" not in self.hf_ds_config.config:\n self.hf_ds_config.config[\"gradient_accumulation_steps\"] = 1\n- elif self.hf_ds_config.config[\"gradient_accumulation_steps\"] == \"auto\":\n- raise ValueError(\"gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config.\")\n if \"zero_optimization\" not in self.hf_ds_config.config:\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n- else:\n- if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n-\n- if self.gradient_clipping is None:\n- gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n- if gradient_clipping != \"none\":\n- self.gradient_clipping = float(gradient_clipping)\n-\n- if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n-\n- if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n-\n- if self.offload_param_device is None:\n- self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n-\n- if self.zero3_save_16bit_model is None:\n- self.zero3_save_16bit_model = (\n- os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n- )\n \n+ self._deepspeed_config_checks()\n+ kwargs = {\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"gradient_clipping\": self.gradient_clipping if self.gradient_clipping else 1.0,\n+ \"zero_optimization.stage\": self.zero_stage,\n+ \"zero_optimization.offload_optimizer.device\": self.offload_optimizer_device,\n+ \"zero_optimization.offload_param.device\": self.offload_param_device,\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": self.zero3_save_16bit_model,\n+ }\n+ for key in kwargs.keys():\n+ self.fill_match(key, **kwargs, must_match=False)\n+ self.hf_ds_config.set_stage_and_offload()\n+ else:\n config = {\n \"train_batch_size\": \"auto\",\n \"train_micro_batch_size_per_gpu\": \"auto\",\n@@ -450,15 +461,19 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = (\n+ strtobool(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", str(self.hf_ds_config.is_zero3()))) == 1\n+ )\n if self.zero3_init_flag and not self.hf_ds_config.is_zero3():\n warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\n self.zero3_init_flag = False\n \n- def fill_match(self, ds_key_long, mismatches, must_match=True, **kwargs):\n+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):\n+ mismatches = [] if mismatches is None else mismatches\n config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)\n if config is None:\n return\n@@ -503,10 +518,28 @@ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must\n \n def set_mixed_precision(self, mixed_precision):\n ds_config = self.deepspeed_config\n- if mixed_precision == \"fp16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n- ds_config.update({\"fp16\": {\"enabled\": True, \"auto_cast\": True}})\n- elif mixed_precision == \"bf16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n- ds_config.update({\"bf16\": {\"enabled\": True}})\n+ kwargs = {\n+ \"fp16.enabled\": mixed_precision == \"fp16\",\n+ \"bf16.enabled\": mixed_precision == \"bf16\",\n+ }\n+ if mixed_precision == \"fp16\":\n+ if \"fp16\" not in ds_config:\n+ ds_config[\"fp16\"] = {\"enabled\": True, \"auto_cast\": True}\n+ elif mixed_precision == \"bf16\":\n+ if \"bf16\" not in ds_config:\n+ ds_config[\"bf16\"] = {\"enabled\": True}\n+\n+ if mixed_precision != \"no\":\n+ diff_dtype = \"bf16\" if mixed_precision == \"fp16\" else \"fp16\"\n+ if str(ds_config.get(diff_dtype, {}).get(\"enabled\", \"False\")).lower() == \"true\":\n+ raise ValueError(\n+ f\"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file.\"\n+ )\n+ for dtype in [\"fp16\", \"bf16\"]:\n+ if dtype not in ds_config:\n+ ds_config[dtype] = {\"enabled\": False}\n+ self.fill_match(\"fp16.enabled\", must_match=False, **kwargs)\n+ self.fill_match(\"bf16.enabled\", must_match=False, **kwargs)\n \n def set_deepspeed_weakref(self):\n from .imports import is_transformers_available\n@@ -549,6 +582,31 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower() for name in env_variable_names_to_ignore\n+ ]\n+\n+ deepspeed_fields_from_accelerate_config = os.environ.get(\"ACCELERATE_CONFIG_DS_FIELDS\", \"\").split(\",\")\n+\n+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\\n\"\n+ \"The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n\"\n+ \"It will only ask for the necessary config variables when using `deepspeed_config_file`.\"\n+ )\n+\n \n @dataclass\n class FullyShardedDataParallelPlugin:\ndiff --git a/src/accelerate/utils/deepspeed.py b/src/accelerate/utils/deepspeed.py\nindex 02d1ab8bc..69dc5c7f8 100644\n--- a/src/accelerate/utils/deepspeed.py\n+++ b/src/accelerate/utils/deepspeed.py\n@@ -50,6 +50,9 @@ def __init__(self, config_file_or_dict):\n raise ValueError(\"expecting either a path to a DeepSpeed config file or a pre-populated dict\")\n self.config = config\n \n+ self.set_stage_and_offload()\n+\n+ def set_stage_and_offload(self):\n # zero stage - this is done as early as possible, before model is created, to allow\n # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\n # during ``zero.Init()`` which needs to know the dtype, and some other hparams.\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex eca75c1ed..fceb0bd23 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -285,8 +285,6 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n from deepspeed.runtime.engine import DeepSpeedEngine\n \n kwargs = {\n- \"fp16.enabled\": True,\n- \"bf16.enabled\": False,\n \"optimizer.params.lr\": 5e-5,\n \"optimizer.params.weight_decay\": 0.0,\n \"scheduler.params.warmup_min_lr\": 0.0,\n@@ -370,7 +368,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n # Test DeepSpeed optimizer + DeepSpeed scheduler\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n@@ -430,7 +428,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n # Test custom optimizer + DeepSpeed scheduler\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n@@ -463,7 +461,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n # Test deepspeed optimizer + custom scheduler\n deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n@@ -501,8 +499,6 @@ def test_save_checkpoints(self):\n )\n del deepspeed_plugin.deepspeed_config[\"bf16\"]\n kwargs = {\n- \"fp16.enabled\": True,\n- \"bf16.enabled\": False,\n \"optimizer.params.lr\": 5e-5,\n \"optimizer.params.weight_decay\": 0.0,\n \"scheduler.params.warmup_min_lr\": 0.0,\n@@ -518,7 +514,7 @@ def test_save_checkpoints(self):\n }\n \n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=\"fp16\")\n kwargs[\"train_batch_size\"] = (\n kwargs[\"train_micro_batch_size_per_gpu\"]\n * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n@@ -594,6 +590,81 @@ def test_autofill_dsconfig(self):\n accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\n )\n \n+ @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\n+ def test_autofill_dsconfig_from_ds_plugin(self, dtype):\n+ ds_config = self.ds_config_dict[\"zero3\"]\n+ if dtype == BF16:\n+ del ds_config[\"fp16\"]\n+ else:\n+ del ds_config[\"bf16\"]\n+ ds_config[dtype][\"enabled\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"stage\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"offload_optimizer\"][\"device\"] = \"auto\"\n+ ds_config[\"zero_optimization\"][\"offload_param\"][\"device\"] = \"auto\"\n+ ds_config[\"gradient_accumulation_steps\"] = \"auto\"\n+ ds_config[\"gradient_clipping\"] = \"auto\"\n+\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ hf_ds_config=ds_config,\n+ zero3_init_flag=True,\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ )\n+\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)\n+ deepspeed_plugin = accelerator.state.deepspeed_plugin\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_clipping\"], 1.0)\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"], 1)\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"stage\"], 2)\n+ self.assertEqual(\n+ deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"offload_optimizer\"][\"device\"], \"cpu\"\n+ )\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"offload_param\"][\"device\"], \"cpu\")\n+ self.assertTrue(\n+ deepspeed_plugin.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\n+ )\n+ self.assertTrue(deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\n+\n+ AcceleratorState._reset_state()\n+ diff_dtype = \"bf16\" if dtype == \"fp16\" else \"fp16\"\n+ with mockenv_context(**self.dist_env):\n+ with self.assertRaises(ValueError) as cm:\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=diff_dtype)\n+ self.assertTrue(\n+ f\"`--mixed_precision` arg cannot be set to `{diff_dtype}` when `{dtype}` is set in the DeepSpeed config file.\"\n+ in str(cm.exception)\n+ )\n+\n+ def test_ds_config_assertions(self):\n+ ambiguous_env = self.dist_env.copy()\n+ ambiguous_env[\n+ \"ACCELERATE_CONFIG_DS_FIELDS\"\n+ ] = \"gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision\"\n+\n+ with mockenv_context(**ambiguous_env):\n+ with self.assertRaises(ValueError) as cm:\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ hf_ds_config=self.ds_config_file[ZERO3],\n+ zero3_init_flag=True,\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=ZERO2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ )\n+ _ = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=FP16)\n+ self.assertTrue(\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\"\n+ in str(cm.exception)\n+ )\n+\n def test_basic_run(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n test_file_path = os.path.sep.join(\n", "code_comments": [ { "body": "Nit: all `none`->`None`", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": false }, { "body": "```suggestion\r\n \"If you are using an accelerate config file, set `mixed_precision=no` \"\r\n```", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using accelerate config file, set `mixed_precision=no` \"", "from_author": false }, { "body": "```suggestion\r\n \"and make sure to not specify these config variables in `accelerate launch` command. \\n\"\r\n```", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using accelerate config file, set `mixed_precision=no` \"\n+ \"and remove others config variables mentioned in the above specified list; \"\n+ \"else don't specify these config variables in `accelerate launch` command. \\n\"", "from_author": false }, { "body": "Hello, `none` is the string option as possible values are `cpu`|`nvme`l`none`", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": true }, { "body": "Oh then add quotes?", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -301,45 +301,50 @@ def launch_command_parser(subparsers=None):\n \"--zero_stage\",\n default=None,\n type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n+ \"If unspecified, will default to `none`.\",", "from_author": true }, { "body": "Did I understand it right that here you suggest to the user to set `mixed_precision=no` even if they use mixed precision in ds_config.json? If so this now is worse than before, as now you're proposing to force 2 opposite values in the same config entry. As in `mixed_precision=no` and `fp16 { enabled: true}`.\r\n\r\nUnless I'm misunderstanding the wording that is. \r\n\r\nI think it should be either `mixed_precision=xyz` or `ds_config.json` (with fp16 or bf16 blocks or no block at all as fp16 is the default), but not both.", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false }, { "body": "```suggestion\r\n \"The easiest method is to create a new config following the questionnaire via `accelerate config`.\\n\"\r\n```", "diff_hunk": "@@ -578,10 +578,9 @@ def _deepspeed_config_checks(self):\n raise ValueError(\n f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n- \"If you are using an accelerate config file, set `mixed_precision=no` \"\n- \"and remove others config variables mentioned in the above specified list; \"\n+ \"If you are using an accelerate config file, remove others config variables mentioned in the above specified list; \"\n \"and make sure to not specify these config variables in `accelerate launch` command. \\n\"\n- \"The easiest method is to create new config following the questionnaire via `accelerate config`.\\n\"\n+ \"The easiest method is to create new config following the questionnaire via `accelerate config`.\\n\"", "from_author": false }, { "body": "Done. I've simplified the error message wherein user can just remove all the ambiguous entries from accelerate config file and not specify them in launch command", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": true }, { "body": "to focus we are dealing with `mixed_precision` duplicity here:\r\n\r\nI've just tried:\r\n\r\n```\r\ndeepspeed_config:\r\n deepspeed_multinode_launcher: standard\r\n deepspeed_config_file: ./configs/vopt-large-z3/ds_config.json\r\n zero3_init_flag: true\r\ndistributed_type: DEEPSPEED\r\nmixed_precision: 'no'\r\n```\r\n\r\n```\r\n{\r\n \"fp16\": {\r\n \"enabled\": true,\r\n [...]\r\n```\r\nand it doesn't assert.\r\n\r\nwith the latest commit of `dd2d57b6a1`\r\n\r\n", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false }, { "body": "(incidentally `mixed_precision: fp16|bf16|'no'` is odd - why do different values don't follow the same style - i.e 'no' in quotes but not the other values)", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false }, { "body": "Hello @stas, this case can't be helped as 'no' is the default value and if one doesn't specify the entry in accelerate config file, it will get default value of 'no' and thereby we have no way to check if user has given the default in config file or was it set to default when reading the config file. It would mean a lot of code rewriting as `mixed_precision` is used in various other non DeepSpeed parts.\n\nThe default value of `mixed_precision` would be overriden by that in `deepspeed_config_file`. Now, if you answer the questionnaire via `accelerate config`, the entry of `mixed_precision` won't be there when using `deepspeed_config_file`. \n\n\n@sgugger, what are your thoughts around this. ", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": true }, { "body": "understood! in which case I think this should work:\r\n\r\n1. if the `mixed_precision` config isn't in the accelerate config file all is good\r\n2. if the `mixed_precision` config is in the accelerate config file and ds_config file is used, the value of the former must match the value of the latter or assert.", "diff_hunk": "@@ -549,6 +551,40 @@ def zero3_init_context_manager(self, enable=False):\n self.dschf = None\n self.set_deepspeed_weakref()\n \n+ def _deepspeed_config_checks(self):\n+ env_variable_names_to_ignore = [\n+ \"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\",\n+ \"ACCELERATE_GRADIENT_CLIPPING\",\n+ \"ACCELERATE_DEEPSPEED_ZERO_STAGE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\",\n+ \"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\",\n+ \"ACCELERATE_MIXED_PRECISION\",\n+ ]\n+ duplicate_values_flag = False\n+ for name in env_variable_names_to_ignore:\n+ if name != \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, None) is not None:\n+ duplicate_values_flag = True\n+ elif name == \"ACCELERATE_MIXED_PRECISION\" and os.environ.get(name, \"no\") != \"no\":\n+ duplicate_values_flag = True\n+ if duplicate_values_flag:\n+ break\n+\n+ if duplicate_values_flag:\n+ env_variable_names_to_ignore = [\n+ name.replace(\"ACCELERATE_\", \"\").replace(\"DEEPSPEED_\", \"\").lower()\n+ for name in env_variable_names_to_ignore\n+ ]\n+ raise ValueError(\n+ f\"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\\n\"\n+ \"Please specify them appropriately in the DeepSpeed config file.\\n\"\n+ \"If you are using an accelerate config file, set `mixed_precision=no` \"", "from_author": false }, { "body": "```suggestion\r\nBelow is a sample script using `deepspeed_config_file` in different scenarios.\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.", "from_author": false }, { "body": "Are the lines of `-` intended? Should there be a new line before the text?", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? ", "from_author": false }, { "body": "```suggestion\r\n1. Content of the `accelerate` config:\r\n```\r\n(to not mistake this with the command `accelerate config`", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:", "from_author": false }, { "body": "```suggestion\r\n2. Content of the `accelerate` config:\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:", "from_author": false }, { "body": "```suggestion\r\n**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `\"auto\"` in the DeepSpeed` configuration file and check that things work as expected.\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.", "from_author": false }, { "body": "```suggestion\r\n1. New `ds_config.json` with `\"auto\"` for the `accelerate launch` DeepSpeed command arguments:\r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\n+\n+1. New `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:", "from_author": false }, { "body": "```suggestion\r\n**Note**: Remaining `\"auto\"` values are handled in `accelerator.prepare()` call as explained in point 2 of \r\n```", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \n+multi-GPU \n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you wish to optimize your script with torch dynamo?[yes/NO]: \n+Do you want to use DeepSpeed? [yes/NO]: yes \n+Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \n+Please enter the path to the json DeepSpeed config file: ds_config.json \n+Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\n+How many GPU(s) should be used for distributed training? [1]:4\n+accelerate configuration saved at ds_config_sample.yaml\n+```\n+\n+2. `accelerate config`:\n+\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: ds_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 4\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: bf16\n+ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\n+```\n+\n+**Scenario 3**: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\n+\n+1. New `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": \"auto\"\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": \"auto\",\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\n+ \"offload_optimizer\": {\n+ \"device\": \"auto\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"auto\"\n+ }\n+ },\n+ \"gradient_clipping\": \"auto\",\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\n+\n+```bash\n+Distributed environment: DEEPSPEED Backend: nccl\n+Num processes: 4\n+Process index: 0\n+Local process index: 0\n+Device: cuda:0\n+Mixed precision type: fp16\n+ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}\n+```\n+\n+**Note**: Remaining `auto` values are handled in `accelerator.prepare()` call as explained in point 2 of ", "from_author": false }, { "body": "Why is this set to True by default now?", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": false }, { "body": "these are the exact outputs from the new config #830. Added new lines. ", "diff_hunk": "@@ -395,6 +395,194 @@ We will look at the changes needed in the code when using these.\n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n \n+**Things to note when using DeepSpeed Config File**\n+\n+Below is a smaple script using `deepspeed_config_file` in different scenarios.\n+\n+Code `test.py`:\n+\n+```python\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ accelerator.print(f\"{AcceleratorState()}\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n+```\n+\n+**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\n+\n+1. `accelerate config`:\n+\n+```yaml\n+command_file: null\n+commands: null\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: 'cpu'\n+ offload_param_device: 'cpu'\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+ deepspeed_config_file: 'ds_config.json'\n+distributed_type: DEEPSPEED\n+downcast_bf16: 'no'\n+dynamo_backend: 'NO'\n+fsdp_config: {}\n+gpu_ids: null\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+num_machines: 1\n+num_processes: 2\n+rdzv_backend: static\n+same_network: true\n+tpu_name: null\n+tpu_zone: null\n+use_cpu: false\n+```\n+\n+2. `ds_config.json`:\n+\n+```json\n+{\n+ \"bf16\": {\n+ \"enabled\": true\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"stage3_gather_16bit_weights_on_model_save\": false,\n+ \"offload_optimizer\": {\n+ \"device\": \"none\"\n+ },\n+ \"offload_param\": {\n+ \"device\": \"none\"\n+ }\n+ },\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"gradient_accumulation_steps\": 10,\n+ \"steps_per_print\": 2000000\n+}\n+```\n+\n+3. Output of `accelerate launch test.py`:\n+\n+```bash\n+ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \n+['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \n+'zero3_save_16bit_model', 'mixed_precision'].\n+Please specify them appropriately in the DeepSpeed config file.\n+If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n+The easiest method is to create a new config following the questionnaire via `accelerate config`.\n+It will only ask for the necessary config variables when using `deepspeed_config_file`.\n+```\n+\n+**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\n+\n+1. Run `accelerate config`:\n+\n+```bash\n+$ accelerate config\n+-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\n+This machine \n+-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? ", "from_author": true }, { "body": "Hello, as stas mentioned in Issue #922 \r\n\r\n> this flag should be True by default as zero stage 3 is for large models, it's very unlikely the user will be able to load those models w/o zero.Init", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": true }, { "body": "Is this only used if zero3 is enabled in the rest of the code? It should at least only default to True when Zero-3 is enabled otherwise.", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": false }, { "body": "Yes, only used when zero3 is enabled else will give a warning and gets set to False as mentioned in on of the above threads. Done, defaults to True only for stage 3.", "diff_hunk": "@@ -450,15 +461,17 @@ def __post_init__(self):\n if self.gradient_clipping:\n config[\"gradient_clipping\"] = self.gradient_clipping\n self.hf_ds_config = HfDeepSpeedConfig(config)\n+\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"true\") == \"true\"", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I tried this branch, getting:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"m4/training/main.py\", line 47, in <module>\r\n accelerator = Accelerator(\r\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/accelerator.py\", line 246, in __init__\r\n DeepSpeedPlugin() if os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" else None\r\n File \"<string>\", line 12, in __init__\r\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/utils/dataclasses.py\", line 412, in __post_init__\r\n self._deepspeed_config_checks()\r\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/utils/dataclasses.py\", line 560, in _deepspeed_config_checks\r\n if ds_gradient_accumulation_steps != int(accelerate_gradient_accumulation_steps):\r\nValueError: invalid literal for int() with base 10: 'None'\r\n```\r\n\r\nconfigs:\r\n\r\n```\r\n{\r\n \"fp16\": {\r\n \"enabled\": true,\r\n \"auto_cast\": true,\r\n \"loss_scale\": 0.0,\r\n \"initial_scale_power\": 10,\r\n \"loss_scale_window\": 1000,\r\n \"hysteresis\": 2,\r\n \"min_loss_scale\": 1\r\n },\r\n \"zero_optimization\": {\r\n \"stage\": 3,\r\n \"allgather_partitions\": true,\r\n \"allgather_bucket_size\": 5e8,\r\n \"overlap_comm\": false,\r\n \"reduce_scatter\": true,\r\n \"reduce_bucket_size\": \"auto\",\r\n \"contiguous_gradients\": true,\r\n \"stage3_gather_16bit_weights_on_model_save\": false,\r\n \"offload_optimizer\": {\r\n \"device\": \"none\"\r\n },\r\n \"offload_param\": {\r\n \"device\": \"none\"\r\n }\r\n },\r\n \"gradient_clipping\": 1.0,\r\n \"gradient_accumulation_steps\": 2,\r\n \"train_batch_size\": \"auto\",\r\n \"train_micro_batch_size_per_gpu\": \"auto\",\r\n \"steps_per_print\": 2000000\r\n}\r\n\r\ncompute_environment: LOCAL_MACHINE\r\ndeepspeed_config:\r\n deepspeed_multinode_launcher: standard\r\n deepspeed_config_file: ./configs/vopt-large-z3/ds_config.json\r\n zero3_init_flag: true\r\ndistributed_type: DEEPSPEED\r\nfsdp_config: {}\r\nmachine_rank: 0\r\nmain_process_ip: null\r\nmain_process_port: null\r\nmain_training_function: main\r\nnum_machines: 1\r\nnum_processes: 1\r\n#num_machines: 20\r\n#num_processes: 80\r\nuse_cpu: false\r\n\r\n\r\n```", "from_author": false }, { "body": "@pacman100 default needs to be 1 instead of `None` :)", "from_author": false }, { "body": "Also clarifying defaults for args in this PR. Now, `accelerate launch --use_deepspeed --help` shows below output wherein default value info is specified `If unspecified, will default to *`:\r\n```\r\n...\r\n\r\nDeepSpeed Arguments:\r\n Arguments related to DeepSpeed.\r\n\r\n --deepspeed_config_file DEEPSPEED_CONFIG_FILE\r\n DeepSpeed config file.\r\n --zero_stage ZERO_STAGE\r\n DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag\r\n is passed). If unspecified, will default to `2`.\r\n --offload_optimizer_device OFFLOAD_OPTIMIZER_DEVICE\r\n Decides where (none|cpu|nvme) to offload optimizer states (useful only\r\n when `use_deepspeed` flag is passed). If unspecified, will default to\r\n `none`.\r\n --offload_param_device OFFLOAD_PARAM_DEVICE\r\n Decides where (none|cpu|nvme) to offload parameters (useful only when\r\n `use_deepspeed` flag is passed). If unspecified, will default to `none`.\r\n --gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS\r\n No of gradient_accumulation_steps used in your training script (useful\r\n only when `use_deepspeed` flag is passed). If unspecified, will default to\r\n `1`.\r\n --gradient_clipping GRADIENT_CLIPPING\r\n gradient clipping value used in your training script (useful only when\r\n `use_deepspeed` flag is passed). If unspecified, will default to `1.0`.\r\n --zero3_init_flag ZERO3_INIT_FLAG\r\n Decides Whether (true|false) to enable `deepspeed.zero.Init` for\r\n constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.\r\n If unspecified, will default to `true`.\r\n --zero3_save_16bit_model ZERO3_SAVE_16BIT_MODEL\r\n Decides Whether (true|false) to save 16-bit model weights when using ZeRO\r\n Stage-3. Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will\r\n default to `false`.\r\n --deepspeed_hostfile DEEPSPEED_HOSTFILE\r\n DeepSpeed hostfile for configuring multi-node compute resources.\r\n --deepspeed_exclusion_filter DEEPSPEED_EXCLUSION_FILTER\r\n DeepSpeed exclusion filter string when using mutli-node setup.\r\n --deepspeed_inclusion_filter DEEPSPEED_INCLUSION_FILTER\r\n DeepSpeed inclusion filter string when using mutli-node setup.\r\n --deepspeed_multinode_launcher DEEPSPEED_MULTINODE_LAUNCHER\r\n DeepSpeed multi-node launcher to use. If unspecified, will default to\r\n `pdsh`.\r\n\r\n```", "from_author": true }, { "body": "> ```shell\r\n> ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be \r\n> ignored: ['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', \r\n> 'offload_optimizer_device', 'offload_param_device', 'zero3_save_16bit_model', 'mixed_precision'].\r\n> Please specify them appropriately in the DeepSpeed config file.\r\n> If you are using accelerate config file, set `mixed_precision=no` and remove others config variables\r\n> mentioned in the above specified list; else don't specify these config variables in `accelerate \r\n> launch` command. \r\n> The easiest method is to create new config following the questionnaire via `accelerate config`.\r\n> It will only ask for the necessary config variables when using `deepspeed_config_file`.\r\n> ```\r\n\r\nNow, the error will be this", "from_author": true }, { "body": "> but really what we want is one definitive source of information.\r\n\r\nHello @stas00, let us know if this addresses the issue. When `deepspeed_config_file` is specified, it is the single definitive source of information and error is raised when duplicates are found either in accelerate config file or through the arguments of `accelerate launch` command", "from_author": true }, { "body": "> Will this still work with `auto` entries in `ds_config.json`\r\n\r\nHello Stas, they will work as before because nothing is changing other than the fact that an error is raised if their is any duplicate entry in `accelerate config` with things that are in `deepspeed_config_file`. Here, these fields which can be duplicates are never `auto` in either case and hence fields that had `auto` support won't be impacted.", "from_author": true }, { "body": "Thank you for confirming that, @pacman100 - let me try it out as it's easier to see things in action.", "from_author": false }, { "body": "But this shows otherwise:\r\n```\r\n File \"/mnt/nvme0/code/huggingface/accelerate-master/src/accelerate/utils/dataclasses.py\", line 409, in __post_init__\r\n raise ValueError(\"gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config.\")\r\nValueError: gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config.\r\nERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: 1) local_rank: 0 (pid\r\n```\r\n\r\nwhy can't it be `auto`? I'd expect it to be set either (1) to the default value or (2) the value passed via the `--gradient_accumulation_steps` cmd line arg.\r\n\r\nI wonder if we are having a miscommunication here. I brought up the issue of duplicity of the 2 styles of settings the ds config, since accelerate used its own config files from the beginning, but I have never suggested that setting values via cmd line args support should be dropped.", "from_author": false }, { "body": "Regarding `gradient_accumulation_steps` and `auto`, that piece of code wasn't changed at all and behaviour is the same as before. If one doesn't specify that entry in the config file, it is set to default of `1`, if it is `auto` error is raised else thale given value is used. When not using `deepspeed_config_file`, it is asked when using `accelerate config` cmd.", "from_author": true }, { "body": "> Regarding `gradient_accumulation_steps` and `auto`, that piece of code wasn't changed at all and behaviour is the same as before. If one doesn't specify that entry in the config file, it is set to default of `1`, if it is `auto` error is raised else thale given value is used. When not using `deepspeed_config_file`, it is asked when using `accelerate config` cmd.\r\n\r\nOK, so your logic is different from HF Trainer then.\r\n\r\nThe HF Trainer was directing cmd args into the config file's `auto` values so that the user could override them via cmd line args.\r\n\r\nI'm not saying the 2 logics have to match. If I am not mistaken the accelerate logic is less flexible, but it's ok if you prefer it that way. \r\n\r\nIn HF Trainer the `auto` feature was designed to be used:\r\n1. when the value can't be known before running - \"boot\"-time calculated configs\r\n2. values to be set via cmd line args and defaults", "from_author": false }, { "body": "As accelerate is meant to work with all models apart from Transformers and user being in control of the training loop, they are in charge of all the arguments and the naming convention of arguments will be different across different users. On the other hand, in Trainer, users are restricted to a given args set and as such those can be used to fill the DeepSpeed config due to clear mapping between args and DS config params. The idea is that artifacts sent to `accelerator.prepare` have the params required by DS config and we exactly know the mapping between them and makes filling of the params independent of the user's training loop and their arguments naming convention.\n\nIn accelerate, the `auto` values are those that can be filled via artifacts being sent to `accelerator.prepare` as all the other places, the user has complete control over the training loop, argument naming and filling. The user still has flexibility to fill in all the `auto` values themselves as mentioned here https://github.com/huggingface/accelerate/pull/676#issuecomment-1318139353", "from_author": true }, { "body": "> As accelerate is meant to work with all models apart from Transformers and user being in control of the training loop, they are in charge of all the arguments and the naming convention of arguments will be different across different users. On the other hand, in Trainer, users are restricted to a given args set and as such those can be used to fill the DeepSpeed config due to clear mapping between args and DS config params. The idea is that artifacts sent to `accelerator.prepare` have the params required by DS config and we exactly know the mapping between them and makes filling of the params independent of the user's training loop and their arguments naming convention.\r\n\r\nThank you for explaining this to me, Sourab, but I'm having a hard time following how Accelerate is any different from HF Trainer wrt sending cmd line arg values to the unfilled out config values in ds_config. e.g. the Accelerate launcher provides an explicit list of cmd line args for the deepspeed use. There is a 1:1 mapping here as well. Could you please explain how is this different from the HF Trainer?\r\n\r\nBut as I said above it's totally fine if you prefer to do it this way, Sourab. This doesn't prevent users from doing what they need.\r\n\r\n> In accelerate, the auto values are those that can be filled via artifacts being sent to accelerator.prepare as all the other places, the user has complete control over the training loop, argument naming and filling. The user still has flexibility to fill in all the auto values themselves as mentioned here https://github.com/huggingface/accelerate/pull/676#issuecomment-1318139353\r\n\r\nUnderstood. more work, but doable. Thank you for the explanations.\r\n\r\n", "from_author": false }, { "body": "> Could you please explain how is this different from the HF Trainer?\r\n\r\nUsers can have `bs` or `batch_size` as cmd arguments in their code and as such we can't fill ds config's `train_micro_batch_size_per_gpu` whereas Trainer always maps `args.per_device_train_batch_size` to it. The same reason can go for other configs. \r\n\r\nPlease note that `accelerate launch` cmd args are primarily used for setting accelerate config's deespseed fields rather than setting `auto` values of `deepspeed_config_file`. Now I understood that you meant using `accelerate launch` cmd args for filling in `auto` values of `deepspeed_config_file` and I've made respective changes.\r\n\r\nLatest changes:\r\n\r\nCode `test.py`:\r\n```python\r\nfrom accelerate import Accelerator\r\nfrom accelerate.state import AcceleratorState\r\n\r\ndef main():\r\n accelerator = Accelerator()\r\n accelerator.print(f\"{AcceleratorState()}\")\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\nScenario 1: manually tampered accelerate config file having `deepspeed_config_file` along with other entries.\r\n\r\n1. `accelerate config`:\r\n```yaml\r\ncommand_file: null\r\ncommands: null\r\ncompute_environment: LOCAL_MACHINE\r\ndeepspeed_config:\r\n gradient_accumulation_steps: 1\r\n gradient_clipping: 1.0\r\n offload_optimizer_device: 'cpu'\r\n offload_param_device: 'cpu'\r\n zero3_init_flag: true\r\n zero3_save_16bit_model: true\r\n zero_stage: 3\r\n deepspeed_config_file: 'ds_config.json'\r\ndistributed_type: DEEPSPEED\r\ndowncast_bf16: 'no'\r\ndynamo_backend: 'NO'\r\nfsdp_config: {}\r\ngpu_ids: null\r\nmachine_rank: 0\r\nmain_process_ip: null\r\nmain_process_port: null\r\nmain_training_function: main\r\nmegatron_lm_config: {}\r\nnum_machines: 1\r\nnum_processes: 2\r\nrdzv_backend: static\r\nsame_network: true\r\ntpu_name: null\r\ntpu_zone: null\r\nuse_cpu: false\r\n```\r\n\r\n2. `ds_config.json`:\r\n```json\r\n{\r\n \"bf16\": {\r\n \"enabled\": true\r\n },\r\n \"zero_optimization\": {\r\n \"stage\": 3,\r\n \"stage3_gather_16bit_weights_on_model_save\": false,\r\n \"offload_optimizer\": {\r\n \"device\": \"none\"\r\n },\r\n \"offload_param\": {\r\n \"device\": \"none\"\r\n }\r\n },\r\n \"gradient_clipping\": 1.0,\r\n \"train_batch_size\": \"auto\",\r\n \"train_micro_batch_size_per_gpu\": \"auto\",\r\n \"gradient_accumulation_steps\": 10,\r\n \"steps_per_print\": 2000000\r\n}\r\n```\r\n\r\n3. Output of `accelerate launch test.py`:\r\n```\r\nValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: \r\n['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', \r\n'zero3_save_16bit_model', 'mixed_precision'].\r\nPlease specify them appropriately in the DeepSpeed config file.\r\nIf you are using an accelerate config file, remove others config variables mentioned in the above specified list.\r\nThe easiest method is to create a new config following the questionnaire via `accelerate config`.\r\nIt will only ask for the necessary config variables when using `deepspeed_config_file`.\r\n```\r\n\r\nScenario 2: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.\r\n1. Run `accelerate config`:\r\n```\r\n$ accelerate config\r\n-------------------------------------------------------------------------------------------------------------------------------In which compute environment are you running?\r\nThis machine \r\n-------------------------------------------------------------------------------------------------------------------------------Which type of machine are you using? \r\nmulti-GPU \r\nHow many different machines will you use (use more than 1 for multi-node training)? [1]: \r\nDo you wish to optimize your script with torch dynamo?[yes/NO]: \r\nDo you want to use DeepSpeed? [yes/NO]: yes \r\nDo you want to specify a json file to a DeepSpeed config? [yes/NO]: yes \r\nPlease enter the path to the json DeepSpeed config file: ds_config.json \r\nDo you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes\r\nHow many GPU(s) should be used for distributed training? [1]:4\r\naccelerate configuration saved at ds_config_sample.yaml\r\n```\r\n\r\n2. `accelerate config`:\r\n```yaml\r\ncompute_environment: LOCAL_MACHINE\r\ndeepspeed_config:\r\n deepspeed_config_file: ds_config.json\r\n zero3_init_flag: true\r\ndistributed_type: DEEPSPEED\r\ndowncast_bf16: 'no'\r\ndynamo_backend: 'NO'\r\nfsdp_config: {}\r\nmachine_rank: 0\r\nmain_training_function: main\r\nmegatron_lm_config: {}\r\nnum_machines: 1\r\nnum_processes: 4\r\nrdzv_backend: static\r\nsame_network: true\r\nuse_cpu: false\r\n```\r\n\r\n3. Output of `accelerate launch test.py`:\r\n```\r\nDistributed environment: DEEPSPEED Backend: nccl\r\nNum processes: 4\r\nProcess index: 0\r\nLocal process index: 0\r\nDevice: cuda:0\r\nMixed precision type: bf16\r\nds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}\r\n```\r\n\r\nScenario 3: Setting the `accelerate launch` cmd args related to deepspeed as `auto` in `deepspeed_config_file` and check that things work as expected.\r\n1. new `ds_config.json` with `auto` for the `accelerate launch` deepspeed cmd args:\r\n```json\r\n{\r\n \"bf16\": {\r\n \"enabled\": \"auto\"\r\n },\r\n \"zero_optimization\": {\r\n \"stage\": \"auto\",\r\n \"stage3_gather_16bit_weights_on_model_save\": \"auto\",\r\n \"offload_optimizer\": {\r\n \"device\": \"auto\"\r\n },\r\n \"offload_param\": {\r\n \"device\": \"auto\"\r\n }\r\n },\r\n \"gradient_clipping\": \"auto\",\r\n \"train_batch_size\": \"auto\",\r\n \"train_micro_batch_size_per_gpu\": \"auto\",\r\n \"gradient_accumulation_steps\": \"auto\",\r\n \"steps_per_print\": 2000000\r\n}\r\n```\r\n\r\n2. Output of `accelerate launch --mixed_precision=\"fp16\" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device=\"cpu\" --offload_optimizer_device=\"nvme\" --zero3_save_16bit_model=\"true\" test.py`:\r\n```bash\r\nDistributed environment: DEEPSPEED Backend: nccl\r\nNum processes: 4\r\nProcess index: 0\r\nLocal process index: 0\r\nDevice: cuda:0\r\nMixed precision type: fp16\r\nds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}\r\n```\r\n\r\n**Note**: Remaining `auto` values are handled in `accelerator.prepare()` call.\r\n\r\n\r\n", "from_author": true }, { "body": "Looks fantastic, Sourab! Thank you for the improvements and taking the time to layout out the different scenarios - if I'm not mistaken those would make for perfect additions to the documentation if it resonates. (at the very least the last one to demo how `auto` values work and why would one want to use those).\r\n\r\nBTW, the config generates things like:\r\n```\r\nfsdp_config: {}\r\nmegatron_lm_config: {}\r\n```\r\nwhy not just skip parts that the user hasn't asked for? It just makes the config scarier than it is, no? I'm asking since when I first looked at it I wasn't a all sure which of the empty placeholders were safe to remove and which aren't. My personal preference is for active config - that is to only ever list config entries that I work with and any defaults should be just that defaults and not be listed at all. Which I suppose isn't the case with typical configs where everything is listed out whether it's being used or not.\r\n\r\nAnd I can of course remove all those, so definitely it's not an issue, I'm just asking if my thinking resonates with you.", "from_author": false }, { "body": "Sourab, I found one more ambiguous combo in one of our tests:\r\n\r\n```\r\n zero3_init_flag: true\r\n zero_stage: 2\r\n```\r\n\r\nThis combo is quietly getting accepted. I'm concerned that a developer may see `zero3_init_flag: true` and think it's zero3?\r\n\r\nDo you think accelerate should assert when `zero3_init_flag==True` is used with `stage < 3`?", "from_author": false }, { "body": "Hello @stas00, with current setup below warning is given which I think is fine:\r\n```bash\r\nUserWarning: DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\r\n```", "from_author": true }, { "body": "oh boy. I didn't see it. :(\r\n\r\nIf a tree falls in a forest and no one is around to hear it, does it make a sound?\r\n\r\nI guess I need to start using this pragma to turn warnings into errors, but then some warnings can't be acted upon :(\r\n\r\n```\r\nimport warnings\r\nwarnings.filterwarnings(\"error\")\r\n```\r\n\r\nThis is for example an even larger issue for tests, where distributed setup hides most warnings or again there are too many of warnings to see anything.\r\n", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/941", "pr_id": 1176040949 }, { "diff": "diff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 6ea99f9bd..13a8d7afa 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -206,7 +206,7 @@ Now that this is done, you can run your script with the following command:\n accelerate launch path_to_script.py --args_for_the_script\n ```\n \n-If you stored the config file in a non-default location, you can indicate it to the launcher like his:\n+If you stored the config file in a non-default location, you can indicate it to the launcher like this:\n \n ```bash\n accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks for the fix! ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/939", "pr_id": 1175473268 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 8a45856cd..7dafed0e5 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -711,7 +711,9 @@ def prepare_data_loader(\n # Need to provide batch_size as batch_sampler is None for Iterable dataset\n if new_batch_sampler is None:\n kwargs[\"drop_last\"] = dataloader.drop_last\n- kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n+ kwargs[\"batch_size\"] = (\n+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size\n+ )\n \n if dispatch_batches:\n kwargs.pop(\"generator\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Looks like the right fix to me, thanks!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/937", "pr_id": 1174853834 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex ff880cffd..ec5041bfa 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -473,17 +473,7 @@ def is_last_process(self):\n \n @property\n def mixed_precision(self):\n- if self.distributed_type == DistributedType.DEEPSPEED:\n- config = self.state.deepspeed_plugin.deepspeed_config\n- if config.get(\"fp16\", {}).get(\"enabled\", False):\n- mixed_precision = \"fp16\"\n- elif config.get(\"bf16\", {}).get(\"enabled\", False):\n- mixed_precision = \"bf16\"\n- else:\n- mixed_precision = \"no\"\n- else:\n- mixed_precision = self.state.mixed_precision\n- return mixed_precision\n+ return self.state.mixed_precision\n \n def on_main_process(func):\n \"\"\"\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex a8a5e7960..31a6d64a2 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -104,7 +104,7 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.mixed_precision = mixed_precision\n+ self._mixed_precision = mixed_precision\n elif is_tpu_available() and not cpu:\n self.distributed_type = DistributedType.TPU\n self.num_processes = xm.xrt_world_size()\n@@ -120,7 +120,7 @@ def __init__(\n os.environ[\"XLA_USE_BF16\"] = str(1)\n os.environ[\"XLA_DOWNCAST_BF16\"] = str(0)\n self.downcast_bfloat = False\n- self.mixed_precision = mixed_precision\n+ self._mixed_precision = mixed_precision\n elif os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n assert (\n is_deepspeed_available()\n@@ -142,7 +142,7 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\n+ self._mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\n self.deepspeed_plugin = deepspeed_plugin\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\n self.distributed_type = DistributedType.MULTI_GPU\n@@ -154,15 +154,15 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.mixed_precision = mixed_precision\n+ self._mixed_precision = mixed_precision\n if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\":\n self.distributed_type = DistributedType.FSDP\n- if self.mixed_precision != \"no\":\n- fsdp_plugin.set_mixed_precision(self.mixed_precision)\n+ if self._mixed_precision != \"no\":\n+ fsdp_plugin.set_mixed_precision(self._mixed_precision)\n self.fsdp_plugin = fsdp_plugin\n if os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"false\") == \"true\":\n self.distributed_type = DistributedType.MEGATRON_LM\n- megatron_lm_plugin.set_mixed_precision(self.mixed_precision)\n+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)\n self.megatron_lm_plugin = megatron_lm_plugin\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n self.distributed_type = DistributedType.MULTI_CPU\n@@ -204,7 +204,7 @@ def __init__(\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = local_rank\n self.device = torch.device(\"cpu\")\n- self.mixed_precision = mixed_precision\n+ self._mixed_precision = mixed_precision\n else:\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n@@ -237,9 +237,13 @@ def __init__(\n self.device = torch.device(\"cpu\")\n else:\n self.device = torch.device(\"cuda\")\n- self.mixed_precision = mixed_precision\n+ self._mixed_precision = mixed_precision\n \n- if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision == \"no\" and self.device.type == \"cuda\":\n+ if (\n+ self.dynamo_backend != DynamoBackend.NO\n+ and self._mixed_precision == \"no\"\n+ and self.device.type == \"cuda\"\n+ ):\n torch.backends.cuda.matmul.allow_tf32 = True\n self.initialized = True\n \n@@ -252,17 +256,30 @@ def __repr__(self):\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\n f\"Device: {self.device}\\n\"\n+ f\"Mixed precision type: {mixed_precision}\\n\"\n )\n if self.distributed_type == DistributedType.DEEPSPEED:\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\n\"\n- else:\n- repr += f\"Mixed precision type: {mixed_precision}\\n\"\n return repr\n \n # For backward compatibility\n @property\n def use_fp16(self):\n- return self.mixed_precision != \"no\"\n+ return self._mixed_precision != \"no\"\n+\n+ @property\n+ def mixed_precision(self):\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ config = self.deepspeed_plugin.deepspeed_config\n+ if config.get(\"fp16\", {}).get(\"enabled\", False):\n+ mixed_precision = \"fp16\"\n+ elif config.get(\"bf16\", {}).get(\"enabled\", False):\n+ mixed_precision = \"bf16\"\n+ else:\n+ mixed_precision = \"no\"\n+ else:\n+ mixed_precision = self._mixed_precision\n+ return mixed_precision\n \n @staticmethod\n def _reset_state():\n@@ -275,7 +292,7 @@ def _check_initialized(self, mixed_precision=None, cpu=None):\n err = \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`.\"\n if cpu and self.device.type != \"cpu\":\n raise ValueError(err.format(flag=\"cpu=True\"))\n- if mixed_precision is not None and mixed_precision != self.mixed_precision:\n+ if mixed_precision is not None and mixed_precision != self._mixed_precision:\n raise ValueError(err.format(flag=f\"mixed_precision='{mixed_precision}'\"))\n \n \n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_935). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "> This is not ideal in terms of naming. I'm wondering if we shouldn't rename the current attribute `_mixed_precision` and have the property named `mixed_precision` which would be clearer. There might be a bit of code to adapt, but I think it's worth the effort, no?\r\n\r\nYes, I thought of that but changed my mind in order to be least invasive in terms of the changes required. Addressed this in latest commit. Thank you.", "from_author": true }, { "body": "Example with recent changes:\r\n\r\nCode:\r\n```\r\nfrom accelerate import Accelerator\r\nfrom accelerate.state import AcceleratorState\r\n\r\ndef main():\r\n accelerator = Accelerator()\r\n accelerator.print(f\"{AcceleratorState()}\")\r\n accelerator.print(f\"{AcceleratorState().mixed_precision=}\")\r\n accelerator.print(f\"{accelerator.mixed_precision=}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\nconfig:\r\n```yaml\r\ncommand_file: null\r\ncommands: null\r\ncompute_environment: LOCAL_MACHINE\r\ndeepspeed_config:\r\n gradient_accumulation_steps: 1\r\n gradient_clipping: 1.0\r\n offload_optimizer_device: 'cpu'\r\n offload_param_device: 'cpu'\r\n zero3_init_flag: true\r\n zero3_save_16bit_model: true\r\n zero_stage: 3\r\ndistributed_type: DEEPSPEED\r\ndowncast_bf16: 'no'\r\ndynamo_backend: 'NO'\r\nfsdp_config: {}\r\ngpu_ids: null\r\nmachine_rank: 0\r\nmain_process_ip: null\r\nmain_process_port: null\r\nmain_training_function: main\r\nmegatron_lm_config: {}\r\nmixed_precision: 'bf16'\r\nnum_machines: 1\r\nnum_processes: 2\r\nrdzv_backend: static\r\nsame_network: true\r\ntpu_name: null\r\ntpu_zone: null\r\nuse_cpu: false\r\n```\r\n\r\nOutput\r\n```bash\r\nDistributed environment: DEEPSPEED Backend: nccl\r\nNum processes: 2\r\nProcess index: 0\r\nLocal process index: 0\r\nDevice: cuda:0\r\nMixed precision type: bf16\r\nds_config: {'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 1, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'cpu'}, 'offload_param': {'device': 'cpu'}, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_clipping': 1.0, 'steps_per_print': inf, 'bf16': {'enabled': True}}\r\n\r\nAcceleratorState().mixed_precision='bf16'\r\naccelerator.mixed_precision='bf16'\r\n```", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/935", "pr_id": 1173355279 }, { "diff": "diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex dc8883425..4f9617711 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -23,6 +23,7 @@\n import os\n import typing\n import warnings\n+from contextlib import contextmanager\n from dataclasses import dataclass, field\n from datetime import timedelta\n from distutils.util import strtobool\n@@ -529,6 +530,23 @@ def set_deepspeed_weakref(self):\n \n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n \n+ def is_zero3_init_enabled(self):\n+ return self.zero3_init_flag\n+\n+ @contextmanager\n+ def zero3_init_context_manager(self, enable=False):\n+ old = self.zero3_init_flag\n+ if old == enable:\n+ yield\n+ else:\n+ self.zero3_init_flag = enable\n+ self.dschf = None\n+ self.set_deepspeed_weakref()\n+ yield\n+ self.zero3_init_flag = old\n+ self.dschf = None\n+ self.set_deepspeed_weakref()\n+\n \n @dataclass\n class FullyShardedDataParallelPlugin:\n", "code_comments": [ { "body": "Might be clearer to have an `else` block here instead of the early `return`.", "diff_hunk": "@@ -529,6 +530,23 @@ def set_deepspeed_weakref(self):\n \n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n \n+ def is_zero3_init_enabled(self):\n+ return self.zero3_init_flag\n+\n+ @contextmanager\n+ def set_zero3_init(self, enable=False):\n+ old = self.zero3_init_flag\n+ if old == enable:\n+ yield\n+ return\n+ self.zero3_init_flag = enable\n+ self.dschf = None\n+ self.set_deepspeed_weakref()\n+ yield\n+ self.zero3_init_flag = old\n+ self.dschf = None\n+ self.set_deepspeed_weakref()", "from_author": false }, { "body": "Done. ", "diff_hunk": "@@ -529,6 +530,23 @@ def set_deepspeed_weakref(self):\n \n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n \n+ def is_zero3_init_enabled(self):\n+ return self.zero3_init_flag\n+\n+ @contextmanager\n+ def set_zero3_init(self, enable=False):\n+ old = self.zero3_init_flag\n+ if old == enable:\n+ yield\n+ return\n+ self.zero3_init_flag = enable\n+ self.dschf = None\n+ self.set_deepspeed_weakref()\n+ yield\n+ self.zero3_init_flag = old\n+ self.dschf = None\n+ self.set_deepspeed_weakref()", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "here is how I used your new method:\r\n\r\n```\r\n from transformer.utils import ContextManagers\r\n deepspeed_plugin = get_deepspeed_plugin()\r\n if deepspeed_plugin is not None:\r\n zero_init_disabled_context = [deepspeed_plugin.set_zero3_init(enable=False)]\r\n else:\r\n zero_init_disabled_context = []\r\n\r\n with ContextManagers(zero_init_disabled_context):\r\n config = AutoConfig.from_pretrained(vision_model_name, **vision_model_params)\r\n model = AutoModel.from_config(config, torch_dtype=torch_dtype)\r\n```", "from_author": false }, { "body": "> Would it be more intuitive to call it `zero_init_context_manager`?\r\n\r\nrenamed it to `zero3_init_context_manager`", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/932", "pr_id": 1171706400 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fd5ad7d60..04ff21127 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=None,\n+ default=\"true\",\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n- default=None,\n+ default=\"false\",\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n@@ -759,15 +759,15 @@ def deepspeed_launcher(args):\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\n- current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n- current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n- current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n- current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n- current_env[\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n- current_env[\"DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n- current_env[\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n+ current_env[\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n+ current_env[\"ACCELERATE_GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n+ current_env[\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n+ current_env[\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n if args.deepspeed_config_file is not None:\n- current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\n+ current_env[\"ACCELERATE_DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\n \n if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n with open(\".deepspeed_env\", \"a\") as f:\n@@ -1041,8 +1041,7 @@ def launch_command(args):\n for name, attr in defaults.__dict__.items():\n if isinstance(attr, dict):\n for k in defaults.deepspeed_config:\n- if getattr(args, k) is None:\n- setattr(args, k, defaults.deepspeed_config[k])\n+ setattr(args, k, defaults.deepspeed_config[k])\n for k in defaults.fsdp_config:\n arg_to_set = k\n if \"fsdp\" not in arg_to_set:\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex eafad8a09..eea8c3ab3 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -361,7 +361,7 @@ def __post_init__(self):\n from .deepspeed import HfDeepSpeedConfig\n \n if self.hf_ds_config is None:\n- self.hf_ds_config = os.environ.get(\"DEEPSPEED_CONFIG_FILE\", \"none\")\n+ self.hf_ds_config = os.environ.get(\"ACCELERATE_DEEPSPEED_CONFIG_FILE\", \"none\")\n if (\n isinstance(self.hf_ds_config, dict)\n or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != \"none\")\n@@ -377,24 +377,26 @@ def __post_init__(self):\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n else:\n if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n \n if self.gradient_clipping is None:\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n if gradient_clipping != \"none\":\n self.gradient_clipping = float(gradient_clipping)\n \n if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n \n if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n \n if self.offload_param_device is None:\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n \n if self.zero3_save_16bit_model is None:\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ self.zero3_save_16bit_model = (\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ )\n \n config = {\n \"train_batch_size\": \"auto\",\n@@ -417,7 +419,7 @@ def __post_init__(self):\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n if self.zero3_init_flag and not self.hf_ds_config.is_zero3():\n warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\n self.zero3_init_flag = False\n", "code_comments": [ { "body": "this logic needs to be recoded since the default is no longer `None` and thus this code will never run.", "diff_hunk": "@@ -377,24 +377,26 @@ def __post_init__(self):\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n else:\n if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n \n if self.gradient_clipping is None:\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n if gradient_clipping != \"none\":\n self.gradient_clipping = float(gradient_clipping)\n \n if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n \n if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n \n if self.offload_param_device is None:\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n \n if self.zero3_save_16bit_model is None:\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ self.zero3_save_16bit_model = (\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ )", "from_author": false }, { "body": "this logic needs to be recoded since the default is no longer `None` and thus this code will never run.", "diff_hunk": "@@ -417,7 +419,7 @@ def __post_init__(self):\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"", "from_author": false }, { "body": "may be add to `help` that `ACCELERATE_DEEPSPEED_ZERO3_INIT` env var can override it?", "diff_hunk": "@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=None,\n+ default=\"true\",\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",", "from_author": false }, { "body": "may be add to `help` that `ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL` env var can override it?", "diff_hunk": "@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=None,\n+ default=\"true\",\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n- default=None,\n+ default=\"false\",\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",", "from_author": false }, { "body": "Hello, `ACCELERATE_DEEPSPEED_ZERO3_INIT` is set using `zero3_init_flag` arg, so I don't think explicit mention is required as it is only used internally.", "diff_hunk": "@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=None,\n+ default=\"true\",\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",", "from_author": true }, { "body": "Same explanation as above", "diff_hunk": "@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=None,\n+ default=\"true\",\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n- default=None,\n+ default=\"false\",\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",", "from_author": true }, { "body": "ah, understood, thank you!", "diff_hunk": "@@ -343,14 +343,14 @@ def launch_command_parser(subparsers=None):\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n- default=None,\n+ default=\"true\",\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",", "from_author": false }, { "body": "Hello, here dataclass object attributes are still `None` initially if the user hasn't passed them explicitly by directly creating `DeepSpeedPlugin` object. In `__post_init__` call, the env variables which were set via [default] cmd args are used for setting dataclass object attributes.", "diff_hunk": "@@ -377,24 +377,26 @@ def __post_init__(self):\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n else:\n if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n \n if self.gradient_clipping is None:\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n if gradient_clipping != \"none\":\n self.gradient_clipping = float(gradient_clipping)\n \n if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n \n if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n \n if self.offload_param_device is None:\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n \n if self.zero3_save_16bit_model is None:\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ self.zero3_save_16bit_model = (\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ )", "from_author": true }, { "body": "Same explanation as above", "diff_hunk": "@@ -417,7 +419,7 @@ def __post_init__(self):\n self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n- self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ self.zero3_init_flag = os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"", "from_author": true }, { "body": "my bad then! my apologies for the false alert - thank you for explaining, @pacman100!", "diff_hunk": "@@ -377,24 +377,26 @@ def __post_init__(self):\n raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n else:\n if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+ self.gradient_accumulation_steps = int(os.environ.get(\"ACCELERATE_GRADIENT_ACCUMULATION_STEPS\", 1))\n \n if self.gradient_clipping is None:\n- gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\n+ gradient_clipping = os.environ.get(\"ACCELERATE_GRADIENT_CLIPPING\", \"none\")\n if gradient_clipping != \"none\":\n self.gradient_clipping = float(gradient_clipping)\n \n if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+ self.zero_stage = int(os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO_STAGE\", 2))\n \n if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+ self.offload_optimizer_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n \n if self.offload_param_device is None:\n- self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+ self.offload_param_device = os.environ.get(\"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n \n if self.zero3_save_16bit_model is None:\n- self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ self.zero3_save_16bit_model = (\n+ os.environ.get(\"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+ )", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/928", "pr_id": 1168673450 }, { "diff": "diff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex d22006d54..9129fc1a5 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -5,7 +5,14 @@\n __version__ = \"0.15.0.dev0\"\n \n from .accelerator import Accelerator\n-from .big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights, load_checkpoint_and_dispatch\n+from .big_modeling import (\n+ cpu_offload,\n+ disk_offload,\n+ dispatch_model,\n+ init_empty_weights,\n+ init_on_device,\n+ load_checkpoint_and_dispatch,\n+)\n from .launchers import debug_launcher, notebook_launcher\n from .utils import (\n DeepSpeedPlugin,\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 24857ad25..d3247a464 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -62,6 +62,31 @@ def init_empty_weights(include_buffers: bool = False):\n \"\"\"\n if not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\"Initializing empty weights to a meta device requires torch >= 1.9.0\")\n+ with init_on_device(torch.device(\"meta\"), include_buffers=include_buffers) as f:\n+ yield f\n+\n+\n+@contextmanager\n+def init_on_device(device: torch.device, include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the specified device.\n+\n+ Args:\n+ device (`torch.device`):\n+ Device to initialize all parameters on.\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```python\n+ import torch.nn as nn\n+ from accelerate import init_on_device\n+\n+ with init_on_device(device=torch.device(\"cuda\")):\n+ tst = nn.Liner(100, 100) # on `cuda` device\n+ ```\n+ \"\"\"\n old_register_parameter = nn.Module.register_parameter\n if include_buffers:\n old_register_buffer = nn.Module.register_buffer\n@@ -71,12 +96,12 @@ def register_empty_parameter(module, name, param):\n if param is not None:\n param_cls = type(module._parameters[name])\n kwargs = module._parameters[name].__dict__\n- module._parameters[name] = param_cls(module._parameters[name].to(torch.device(\"meta\")), **kwargs)\n+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)\n \n def register_empty_buffer(module, name, buffer):\n old_register_buffer(module, name, buffer)\n if buffer is not None:\n- module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+ module._buffers[name] = module._buffers[name].to(device)\n \n # Patch tensor creation\n if include_buffers:\n@@ -89,7 +114,7 @@ def register_empty_buffer(module, name, buffer):\n \n def patch_tensor_constructor(fn):\n def wrapper(*args, **kwargs):\n- kwargs[\"device\"] = torch.device(\"meta\")\n+ kwargs[\"device\"] = device\n return fn(*args, **kwargs)\n \n return wrapper\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex faf8a7da8..b58b932b1 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -8,6 +8,7 @@\n require_cpu,\n require_cuda,\n require_huggingface_suite,\n+ require_mps,\n require_multi_gpu,\n require_single_gpu,\n require_torch_min_version,\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 94e13412a..3a681bf32 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -87,6 +87,15 @@ def require_cuda(test_case):\n return unittest.skipUnless(torch.cuda.is_available(), \"test requires a GPU\")(test_case)\n \n \n+def require_mps(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`\n+ backend.\n+ \"\"\"\n+ is_mps_supported = hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n+ return unittest.skipUnless(is_mps_supported, \"test requires a `mps` backend support in `torch`\")(test_case)\n+\n+\n def require_huggingface_suite(test_case):\n \"\"\"\n Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.\ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\nindex 4f738f4d8..467a91424 100644\n--- a/tests/test_big_modeling.py\n+++ b/tests/test_big_modeling.py\n@@ -24,10 +24,11 @@\n disk_offload,\n dispatch_model,\n init_empty_weights,\n+ init_on_device,\n load_checkpoint_and_dispatch,\n )\n from accelerate.hooks import remove_hook_from_submodules\n-from accelerate.test_utils import require_cuda, require_multi_gpu, require_torch_min_version, slow\n+from accelerate.test_utils import require_cuda, require_mps, require_multi_gpu, require_torch_min_version, slow\n from accelerate.utils import offload_state_dict\n from transformers import AutoModelForCausalLM, AutoTokenizer\n \n@@ -109,6 +110,22 @@ def test_init_empty_weights_very_large_model(self):\n with init_empty_weights():\n _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n \n+ @require_cuda\n+ def test_init_on_device_cuda(self):\n+ device = torch.device(\"cuda:0\")\n+ with init_on_device(device):\n+ model = nn.Linear(10, 10)\n+ self.assertEqual(model.weight.device, device)\n+ self.assertEqual(model.weight.device, device)\n+\n+ @require_mps\n+ def test_init_on_device_mps(self):\n+ device = torch.device(\"mps:0\")\n+ with init_on_device(device):\n+ model = nn.Linear(10, 10)\n+ self.assertEqual(model.weight.device, device)\n+ self.assertEqual(model.weight.device, device)\n+\n def test_cpu_offload(self):\n model = ModelForTest()\n x = torch.randn(2, 3)\n", "code_comments": [ { "body": "Ah ah, that example needs adapting ;-p\r\n", "diff_hunk": "@@ -62,6 +62,32 @@ def init_empty_weights(include_buffers: bool = False):\n \"\"\"\n if not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\"Initializing empty weights to a meta device requires torch >= 1.9.0\")\n+ with init_on_device(torch.device(\"meta\"), include_buffers=include_buffers) as f:\n+ yield f\n+\n+\n+@contextmanager\n+def init_on_device(device: torch.device, include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the specified device.\n+\n+ Args:\n+ device (`torch.device`):\n+ Device to initialize all parameters on.\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```python\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_on_device(device=torch.device(\"cuda\")):\n+ tst = nn.Liner(100, 100) # on `cuda` device", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I guess we can also extend to specific dtypes as well now.", "from_author": true }, { "body": "Which dtype did you have in mind? For floating types there is a default you can set in PyTorch directly.", "from_author": false }, { "body": "You mean https://pytorch.org/docs/stable/generated/torch.set_default_dtype.html ? Does fp16/bf16 also work? If so my bad.", "from_author": true }, { "body": "Yes it works pretty well. We use it in Transformers [here](https://github.com/huggingface/transformers/blob/17292440c069118fbdb992b9a17da2098fab5b87/src/transformers/modeling_utils.py#L1109) for instance.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/926", "pr_id": 1168181974 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 6b9825523..ccaafbc8e 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -28,7 +28,7 @@\n - local: usage_guides/tracking\n title: Using experiment trackers\n - local: usage_guides/big_modeling\n- title: How to use large models with small resources\n+ title: How perform inference on large models with small resources\n - local: usage_guides/memory\n title: How to avoid CUDA Out-of-Memory\n - local: usage_guides/sagemaker\ndiff --git a/docs/source/usage_guides/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\nindex 1e13849c6..d7523a03b 100644\n--- a/docs/source/usage_guides/big_modeling.mdx\n+++ b/docs/source/usage_guides/big_modeling.mdx\n@@ -10,7 +10,7 @@ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o\n specific language governing permissions and limitations under the License.\n -->\n \n-# Handling big models\n+# Handling big models for inference\n \n When loading a pretrained model in PyTorch, the usual workflow looks like this:\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/921", "pr_id": 1165154155 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex ff3e95ead..2774b18ce 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -85,7 +85,11 @@ def dtype_byte_size(dtype: torch.dtype):\n \n \n def set_module_tensor_to_device(\n- module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+ module: nn.Module,\n+ tensor_name: str,\n+ device: Union[int, str, torch.device],\n+ value: Optional[torch.Tensor] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n ):\n \"\"\"\n A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n@@ -97,6 +101,9 @@ def set_module_tensor_to_device(\n device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n other device).\n+ dtype (`torch.dtype`, *optional*):\n+ If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to\n+ the dtype of the existing parameter in the model.\n \"\"\"\n # Recurse if needed\n if \".\" in tensor_name:\n@@ -116,6 +123,13 @@ def set_module_tensor_to_device(\n if old_value.device == torch.device(\"meta\") and device not in [\"meta\", torch.device(\"meta\")] and value is None:\n raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n \n+ if value is not None:\n+ if dtype is None:\n+ # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model\n+ value = value.to(old_value.dtype)\n+ elif str(value.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\n+ value = value.to(dtype)\n+\n with torch.no_grad():\n if value is None:\n new_value = old_value.to(device)\n@@ -680,8 +694,7 @@ def load_checkpoint_in_model(\n else:\n for param_name, param in checkpoint.items():\n module_name = param_name\n- if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\n- param = param.to(dtype)\n+\n while len(module_name) > 0 and module_name not in device_map:\n module_name = \".\".join(module_name.split(\".\")[:-1])\n if module_name == \"\" and \"\" not in device_map:\n@@ -696,7 +709,7 @@ def load_checkpoint_in_model(\n set_module_tensor_to_device(model, param_name, \"meta\")\n offload_weight(param, param_name, state_dict_folder, index=state_dict_index)\n else:\n- set_module_tensor_to_device(model, param_name, param_device, value=param)\n+ set_module_tensor_to_device(model, param_name, param_device, value=param, dtype=dtype)\n \n # Force Python to clean up.\n del checkpoint\n", "code_comments": [ { "body": "should this not be better done in ` set_module_tensor_to_device` ? Or maybe additionally add a `torch_dtype` arg to `set_module_tensor_to_device` that handles the param correctly if `value=param` is used? ", "diff_hunk": "@@ -680,8 +680,23 @@ def load_checkpoint_in_model(\n else:\n for param_name, param in checkpoint.items():\n module_name = param_name\n+\n if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\n param = param.to(dtype)\n+\n+ # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in\n+ # model\n+ if dtype is None:\n+ old_param = model\n+ splits = param_name.split(\".\")\n+ for split in splits:\n+ old_param = getattr(old_param, split)\n+ if old_param is None:\n+ break\n+\n+ if old_param is not None:\n+ param = param.to(old_param.dtype)", "from_author": false }, { "body": "This is moved to `set_module_tensor_to_device`.", "diff_hunk": "@@ -680,8 +694,7 @@ def load_checkpoint_in_model(\n else:\n for param_name, param in checkpoint.items():\n module_name = param_name\n- if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Actually before merging, could it maybe be better to handle this in `set_module_tensor_to_device` ? E.g. add a `dtype` argument to the function there? This would be easier for `diffusers` to by in line with `accelerate` I think - see: https://github.com/huggingface/diffusers/blob/727434c206f6c22b746e460293035a1324f0bc13/src/diffusers/modeling_utils.py#L491", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/920", "pr_id": 1163744045 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1c3782067..7aaee2de7 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -196,7 +196,6 @@ def __init__(\n self,\n device_placement: bool = True,\n split_batches: bool = False,\n- fp16: bool = None,\n mixed_precision: Union[PrecisionType, str] = None,\n gradient_accumulation_steps: int = 1,\n cpu: bool = False,\n@@ -220,13 +219,6 @@ def __init__(\n f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}\"\n )\n \n- if fp16:\n- warnings.warn(\n- \"`fp16=True` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision='fp16'` instead.\",\n- FutureWarning,\n- )\n- mixed_precision = \"fp16\"\n-\n if dynamo_backend is not None:\n dynamo_backend = DynamoBackend(dynamo_backend.upper())\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fd5ad7d60..afb6498f3 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -20,7 +20,6 @@\n import os\n import subprocess\n import sys\n-import warnings\n from ast import literal_eval\n from pathlib import Path\n from typing import Dict, List\n@@ -64,7 +63,6 @@\n \"--multi-gpu\": \"Distributed GPUs\",\n \"--tpu\": \"TPU\",\n \"--mps\": \"MPS\",\n- \"--use_mps_device\": \"MPS\",\n \"--use_deepspeed\": \"DeepSpeed Arguments\",\n \"--use_fsdp\": \"FSDP Arguments\",\n \"--use_megatron_lm\": \"Megatron-LM Arguments\",\n@@ -165,12 +163,6 @@ def launch_command_parser(subparsers=None):\n hardware_args.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n- hardware_args.add_argument(\n- \"--use_mps_device\",\n- default=False,\n- action=\"store_true\",\n- help=\"This argument is deprecated, use `--mps` instead.\",\n- )\n \n # Resource selection arguments\n resource_args = parser.add_argument_group(\n@@ -191,12 +183,6 @@ def launch_command_parser(subparsers=None):\n \"Choose between FP16 and BF16 (bfloat16) training. \"\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n )\n- resource_args.add_argument(\n- \"--fp16\",\n- default=False,\n- action=\"store_true\",\n- help=\"This argument is deprecated, use `--mixed_precision fp16` instead.\",\n- )\n resource_args.add_argument(\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n )\n@@ -525,12 +511,6 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"ACCELERATE_USE_CPU\"] = str(args.cpu or args.use_cpu)\n- if args.use_mps_device:\n- warnings.warn(\n- '`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use \"--mps\" instead.',\n- FutureWarning,\n- )\n- args.mps = True\n current_env[\"ACCELERATE_USE_MPS_DEVICE\"] = str(args.mps)\n if args.mps:\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n@@ -550,13 +530,6 @@ def simple_launcher(args):\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n \n- if args.fp16:\n- warnings.warn(\n- \"`fp16` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision fp16` instead.\",\n- FutureWarning,\n- )\n- mixed_precision = \"fp16\"\n-\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n \n try:\n@@ -614,13 +587,6 @@ def multi_gpu_launcher(args):\n except ValueError:\n raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.\")\n \n- if args.fp16:\n- warnings.warn(\n- \"`fp16` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision fp16` instead.\",\n- FutureWarning,\n- )\n- mixed_precision = \"fp16\"\n-\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n \n try:\n@@ -749,13 +715,6 @@ def deepspeed_launcher(args):\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n \n- if args.fp16:\n- warnings.warn(\n- '--fp16 flag is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use \"--mixed_precision fp16\" instead.',\n- FutureWarning,\n- )\n- mixed_precision = \"fp16\"\n-\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\n@@ -925,10 +884,6 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n \n- if args.fp16:\n- warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', FutureWarning)\n- mixed_precision = \"fp16\"\n-\n try:\n dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\n except ValueError:\n@@ -1054,15 +1009,12 @@ def launch_command(args):\n \n # Those args are handled separately\n if (\n- name not in [\"compute_environment\", \"fp16\", \"mixed_precision\", \"distributed_type\"]\n+ name not in [\"compute_environment\", \"mixed_precision\", \"distributed_type\"]\n and getattr(args, name, None) is None\n ):\n setattr(args, name, attr)\n if not args.mixed_precision:\n- if args.fp16:\n- args.mixed_precision = \"fp16\"\n- else:\n- args.mixed_precision = defaults.mixed_precision\n+ args.mixed_precision = defaults.mixed_precision\n if args.dynamo_backend is None:\n warned.append(\"\\t`--dynamo_backend` was set to a value of `'no'`\")\n args.dynamo_backend = \"no\"\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex 5768ec06b..2c9db6a75 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -15,7 +15,6 @@\n import os\n import sys\n import tempfile\n-import warnings\n \n import torch\n \n@@ -23,7 +22,7 @@\n from .utils import PrecisionType, PrepareForLaunch, patch_environment\n \n \n-def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n+def notebook_launcher(function, args=(), num_processes=None, mixed_precision=\"no\", use_port=\"29500\"):\n \"\"\"\n Launches a training function, using several processes if it's possible in the current environment (TPU with\n multiple cores for instance).\n@@ -104,13 +103,6 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n \"function.\"\n )\n \n- if use_fp16:\n- warnings.warn(\n- \"`fp16=True` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision='fp16'` instead.\",\n- FutureWarning,\n- )\n- mixed_precision = \"fp16\"\n-\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n # process here (the other ones will be set be the launcher).\n with patch_environment(\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\nindex 6897d9084..ddb053a8a 100644\n--- a/src/accelerate/test_utils/scripts/test_script.py\n+++ b/src/accelerate/test_utils/scripts/test_script.py\n@@ -275,29 +275,6 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n- # TEST that previous fp16 flag still works\n- print(\"Legacy FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(fp16=True)\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n # BF16 support is only for CPU + TPU, and some GPU\n if is_bf16_available():\n # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/917", "pr_id": 1161586415 }, { "diff": "diff --git a/docs/source/package_reference/utilities.mdx b/docs/source/package_reference/utilities.mdx\nindex 3c1a1065e..9ebb60d34 100644\n--- a/docs/source/package_reference/utilities.mdx\n+++ b/docs/source/package_reference/utilities.mdx\n@@ -24,6 +24,8 @@ These are basic dataclasses used throughout πŸ€— Accelerate and they can be pass\n \n [[autodoc]] utils.PrecisionType\n \n+[[autodoc]] utils.ProjectConfiguration\n+\n ## Data Manipulation and Operations\n \n These include data operations that mimic the same `torch` ops but can be used on distributed processes.\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 6d92e7958..6ea99f9bd 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -370,7 +370,11 @@ Note that since all the model parameters are references to tensors, this will lo\n ## Saving/loading entire states\n \n When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.\n-You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so, just by simply passing in a save location. \n+You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.\n+\n+To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example \n+if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.\n+\n If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.\n \n <Tip>\ndiff --git a/docs/source/usage_guides/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\nindex f2684971b..354549205 100644\n--- a/docs/source/usage_guides/checkpoint.mdx\n+++ b/docs/source/usage_guides/checkpoint.mdx\n@@ -17,6 +17,9 @@ saving and loading the model, optimizer, RNG generators, and the GradScaler. Ins\n - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\n - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\n \n+To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example \n+if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.\n+\n It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.\n \n - By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,\n@@ -28,7 +31,7 @@ Below is a brief example using checkpointing to save and reload a state during t\n from accelerate import Accelerator\n import torch\n \n-accelerator = Accelerator()\n+accelerator = Accelerator(project_dir=\"my/save/path\")\n \n my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)\n my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)\n@@ -37,7 +40,7 @@ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, m\n accelerator.register_for_checkpointing(my_scheduler)\n \n # Save the starting state\n-accelerator.save_state(\"my/save/path\")\n+accelerator.save_state()\n \n device = accelerator.device\n my_model.to(device)\n@@ -56,5 +59,5 @@ for epoch in range(num_epochs):\n my_scheduler.step()\n \n # Restore previous state\n-accelerator.load_state(\"my/save/path\")\n+accelerator.load_state(\"my/save/path/checkpointing/checkpoint_0\")\n ```\ndiff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\nindex b22a4dd14..3ff40a4a4 100644\n--- a/docs/source/usage_guides/tracking.mdx\n+++ b/docs/source/usage_guides/tracking.mdx\n@@ -83,6 +83,12 @@ for iteration in config[\"num_iterations\"]:\n accelerator.end_training()\n ```\n \n+If a tracker requires a directory to save data to such as `TensorBoard` then a `logging_dir` or `project_dir` can be passed in. `project_dir` is useful \n+if there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.\n+\n+```python\n+accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=\".\")\n+```\n \n ## Implementing Custom Trackers\n \ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1c3782067..151d1db35 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -16,6 +16,7 @@\n import gc\n import math\n import os\n+import shutil\n import sys\n import warnings\n from contextlib import contextmanager\n@@ -44,6 +45,7 @@\n LoggerType,\n MegatronLMPlugin,\n PrecisionType,\n+ ProjectConfiguration,\n RNGType,\n compare_versions,\n convert_outputs_to_fp32,\n@@ -157,8 +159,11 @@ class Accelerator:\n - `\"comet_ml\"`\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n- logging_dir (`str`, `os.PathLike`, *optional*):\n- A path to a directory for storing logs of locally-compatible loggers.\n+ project_config (`ProjectConfiguration`, *optional*):\n+ A configuration for how saving the state can be handled.\n+ project_dir (`str`, `os.PathLike`, *optional*):\n+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved\n+ checkpoints.\n dispatch_batches (`bool`, *optional*):\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n@@ -205,6 +210,8 @@ def __init__(\n megatron_lm_plugin: MegatronLMPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n+ project_dir: Optional[Union[str, os.PathLike]] = None,\n+ project_config: Optional[ProjectConfiguration] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n even_batches: bool = True,\n@@ -212,7 +219,19 @@ def __init__(\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n dynamo_backend: Union[DynamoBackend, str] = None,\n ):\n- self.logging_dir = logging_dir\n+ if project_config is not None:\n+ self.project_configuration = project_config\n+ else:\n+ self.project_configuration = ProjectConfiguration(project_dir=project_dir)\n+\n+ if logging_dir is not None:\n+ warnings.warn(\n+ \"`logging_dir` is deprecated and will be removed in version 0.18.0 of πŸ€— Accelerate. Use `project_dir` instead.\",\n+ FutureWarning,\n+ )\n+ self.project_configuration.logging_dir = logging_dir\n+ if project_dir is not None and self.project_dir is None:\n+ self.project_configuration.project_dir = project_dir\n if mixed_precision is not None:\n mixed_precision = str(mixed_precision)\n if mixed_precision not in PrecisionType:\n@@ -420,6 +439,18 @@ def local_process_index(self):\n def device(self):\n return self.state.device\n \n+ @property\n+ def project_dir(self):\n+ return self.project_configuration.project_dir\n+\n+ @property\n+ def logging_dir(self):\n+ return self.project_configuration.logging_dir\n+\n+ @property\n+ def save_iteration(self):\n+ return self.project_configuration.iteration\n+\n @property\n def is_main_process(self):\n \"\"\"True for one process only.\"\"\"\n@@ -1596,9 +1627,16 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n- def save_state(self, output_dir: str):\n+ def save_state(self, output_dir: str = None):\n \"\"\"\n- Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n+ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.\n+\n+ If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled\n+ then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater\n+ than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named\n+ `checkpoint_<iteration>`.\n+\n+ Otherwise they are just saved to `output_dir`.\n \n <Tip>\n \n@@ -1611,8 +1649,25 @@ def save_state(self, output_dir: str):\n output_dir (`str` or `os.PathLike`):\n The name of the folder to save all relevant weights and states.\n \"\"\"\n- # Check if folder exists\n- output_dir = os.path.expanduser(output_dir)\n+ if self.project_configuration.automatic_checkpoint_naming:\n+ output_dir = os.path.join(self.project_dir, \"checkpoints\")\n+ os.makedirs(output_dir, exist_ok=True)\n+ if self.project_configuration.automatic_checkpoint_naming:\n+ folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]\n+ if self.project_configuration.total_limit is not None and (\n+ len(folders) + 1 > self.project_configuration.total_limit\n+ ):\n+ folders.sort()\n+ logger.warning(\n+ f\"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint.\"\n+ )\n+ for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]:\n+ shutil.rmtree(folder)\n+ output_dir = os.path.join(output_dir, f\"checkpoint_{self.save_iteration}\")\n+ if os.path.exists(output_dir):\n+ raise ValueError(\n+ f\"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with.\"\n+ )\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving current state to {output_dir}\")\n \n@@ -1660,6 +1715,7 @@ def save_state(self, output_dir: str):\n )\n for i, obj in enumerate(self._custom_objects):\n save_custom_state(obj, output_dir, i)\n+ self.project_configuration.iteration += 1\n return save_location\n \n def load_state(self, input_dir: str):\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 75694ba72..7afebea48 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -147,7 +147,7 @@ class TensorBoardTracker(GeneralTracker):\n name = \"tensorboard\"\n requires_logging_directory = True\n \n- def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]], **kwargs):\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = None, **kwargs):\n self.run_name = run_name\n self.logging_dir = os.path.join(logging_dir, run_name)\n self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\n@@ -451,7 +451,6 @@ def __init__(\n run_name: Optional[str] = None,\n description: Optional[str] = None,\n ):\n-\n experiment_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", experiment_name)\n run_id = os.getenv(\"MLFLOW_RUN_ID\", run_id)\n tags = os.getenv(\"MLFLOW_TAGS\", tags)\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex be78d0089..d3fb916e8 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -16,6 +16,7 @@\n LoggerType,\n MegatronLMPlugin,\n PrecisionType,\n+ ProjectConfiguration,\n RNGType,\n SageMakerDistributedType,\n TensorInformation,\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex eafad8a09..dc8883425 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -313,6 +313,39 @@ class TensorInformation:\n dtype: torch.dtype\n \n \n+@dataclass\n+class ProjectConfiguration:\n+ \"\"\"\n+ Configuration for the Accelerator object based on inner-project needs.\n+ \"\"\"\n+\n+ project_dir: str = field(default=None, metadata={\"help\": \"A path to a directory for storing data.\"})\n+ logging_dir: str = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`.\"\n+ },\n+ )\n+ automatic_checkpoint_naming: bool = field(\n+ default=False,\n+ metadata={\"help\": \"Whether saved states should be automatically iteratively named.\"},\n+ )\n+\n+ total_limit: int = field(\n+ default=None,\n+ metadata={\"help\": \"The maximum number of total saved states to keep.\"},\n+ )\n+\n+ iteration: int = field(\n+ default=0,\n+ metadata={\"help\": \"The current save iteration.\"},\n+ )\n+\n+ def __post_init__(self):\n+ if self.logging_dir is None:\n+ self.logging_dir = self.project_dir\n+\n+\n @dataclass\n class DeepSpeedPlugin:\n \"\"\"\ndiff --git a/tests/test_state_checkpointing.py b/tests/test_state_checkpointing.py\nindex 87b2d3b06..dd2f6a98b 100644\n--- a/tests/test_state_checkpointing.py\n+++ b/tests/test_state_checkpointing.py\n@@ -23,7 +23,7 @@\n from torch.utils.data import DataLoader, TensorDataset\n \n from accelerate import Accelerator\n-from accelerate.utils import set_seed\n+from accelerate.utils import ProjectConfiguration, set_seed\n \n \n logger = logging.getLogger(__name__)\n@@ -75,7 +75,26 @@ def forward(self, x):\n \n \n class CheckpointTest(unittest.TestCase):\n- def test_can_resume_training(self):\n+ def test_with_save_limit(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True)\n+ # Train baseline\n+ accelerator = Accelerator(project_config=project_config)\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ accelerator.save_state()\n+\n+ # Save second state\n+ accelerator.save_state()\n+ self.assertEqual(len(os.listdir(accelerator.project_dir)), 1)\n+\n+ def test_can_resume_training_with_folder(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n set_seed(42)\n model = DummyModel()\n@@ -126,6 +145,58 @@ def test_can_resume_training(self):\n self.assertEqual(opt_state1, opt_state3)\n self.assertEqual(ground_truth_rands, test_rands)\n \n+ def test_can_resume_training(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ project_config = ProjectConfiguration(automatic_checkpoint_naming=True)\n+\n+ # Train baseline\n+ accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ accelerator.save_state()\n+ (a, b) = model.a.item(), model.b.item()\n+ opt_state = optimizer.state_dict()\n+ ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)\n+ (a1, b1) = model.a.item(), model.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True)\n+ accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ accelerator.load_state(os.path.join(tmpdir, \"checkpoints\", \"checkpoint_0\"))\n+ (a2, b2) = model.a.item(), model.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ self.assertEqual(a, a2)\n+ self.assertEqual(b, b2)\n+ self.assertEqual(opt_state, opt_state2)\n+\n+ test_rands = train(2, model, train_dataloader, optimizer, accelerator)\n+ # Save everything\n+ accelerator.save_state()\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(os.path.join(tmpdir, \"checkpoints\", \"checkpoint_1\"))\n+ test_rands += train(1, model, train_dataloader, optimizer, accelerator)\n+ (a3, b3) = model.a.item(), model.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ self.assertEqual(a1, a3)\n+ self.assertEqual(b1, b3)\n+ self.assertEqual(opt_state1, opt_state3)\n+ self.assertEqual(ground_truth_rands, test_rands)\n+\n def test_invalid_registration(self):\n t = torch.tensor([1, 2, 3])\n t1 = torch.tensor([2, 3, 4])\n@@ -147,19 +218,18 @@ def test_with_scheduler(self):\n optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)\n train_dataloader, valid_dataloader = dummy_dataloaders()\n+ project_config = ProjectConfiguration(automatic_checkpoint_naming=True)\n # Train baseline\n- accelerator = Accelerator()\n- model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n- model, optimizer, train_dataloader, valid_dataloader\n+ accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)\n+ model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader, scheduler\n )\n- accelerator.register_for_checkpointing(scheduler)\n # Save initial\n- initial = os.path.join(tmpdir, \"initial\")\n- accelerator.save_state(initial)\n+ accelerator.save_state()\n scheduler_state = scheduler.state_dict()\n train(3, model, train_dataloader, optimizer, accelerator, scheduler)\n self.assertNotEqual(scheduler_state, scheduler.state_dict())\n \n # Load everything back in and make sure all states work\n- accelerator.load_state(initial)\n+ accelerator.load_state(os.path.join(tmpdir, \"checkpoints\", \"checkpoint_0\"))\n self.assertEqual(scheduler_state, scheduler.state_dict())\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex d6ff4908a..1aa341f9b 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -70,9 +70,11 @@ def test_log(self):\n log = list(filter(lambda x: x.is_file(), Path(f\"{dirpath}/{project_name}\").iterdir()))[0]\n self.assertNotEqual(str(log), \"\")\n \n- def test_logging_dir(self):\n+ def test_project_dir(self):\n with self.assertRaisesRegex(ValueError, \"Logging with `tensorboard` requires a `logging_dir`\"):\n _ = Accelerator(log_with=\"tensorboard\")\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ _ = Accelerator(log_with=\"tensorboard\", project_dir=dirpath)\n with tempfile.TemporaryDirectory() as dirpath:\n _ = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n \n", "code_comments": [ { "body": "This is starting to make too many arguments.", "diff_hunk": "@@ -158,7 +159,16 @@ class Accelerator:\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n- A path to a directory for storing logs of locally-compatible loggers.\n+ A path to a directory for storing logs of locally-compatible loggers. If not passed will save in\n+ `project_dir` by default.\n+ project_dir (`str`, `os.PathLike`, *optional*):\n+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved\n+ checkpoints.\n+ automatic_checkpoint_naming (`bool`, *optional*, defaults to `False`):\n+ Whether saved states should be stored in `project_location` and be automatically iteratively named.\n+ save_total_limit (`int`, *optional*):\n+ The maximum number of checkpoints to keep if performing `automatic_checkpoint_naming`, will default to all\n+ of them.", "from_author": false }, { "body": "Fixed with a `SaveConfiguration` class", "diff_hunk": "@@ -158,7 +159,16 @@ class Accelerator:\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n- A path to a directory for storing logs of locally-compatible loggers.\n+ A path to a directory for storing logs of locally-compatible loggers. If not passed will save in\n+ `project_dir` by default.\n+ project_dir (`str`, `os.PathLike`, *optional*):\n+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved\n+ checkpoints.\n+ automatic_checkpoint_naming (`bool`, *optional*, defaults to `False`):\n+ Whether saved states should be stored in `project_location` and be automatically iteratively named.\n+ save_total_limit (`int`, *optional*):\n+ The maximum number of checkpoints to keep if performing `automatic_checkpoint_naming`, will default to all\n+ of them.", "from_author": true }, { "body": "Let's also deprecate logging_dir to include it in the SaveConfig? Wdyt?", "diff_hunk": "@@ -205,13 +213,23 @@ def __init__(\n megatron_lm_plugin: MegatronLMPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n+ project_dir: Optional[Union[str, os.PathLike]] = None,\n+ save_config: Optional[SaveConfiguration] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n even_batches: bool = True,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n dynamo_backend: Union[DynamoBackend, str] = None,\n ):\n+ if save_config is not None:\n+ self.save_configuration = save_config\n+ else:\n+ self.save_configuration = SaveConfiguration()\n+ if project_dir is not None and self.project_dir is None:\n+ self.save_configuration.project_dir = project_dir\n+ if self.project_dir is not None and logging_dir is None:", "from_author": false }, { "body": "I was debating on that, if we did so I think it'd be better to name it `ProjectConfiguration` instead since it's all relative to the total \"project\" at that point. But can make adjustments for it. The only \"con\" I see is given the conversation above for the user the code would change, but the end-result would not so I think it'd be fine", "diff_hunk": "@@ -205,13 +213,23 @@ def __init__(\n megatron_lm_plugin: MegatronLMPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n+ project_dir: Optional[Union[str, os.PathLike]] = None,\n+ save_config: Optional[SaveConfiguration] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n even_batches: bool = True,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n dynamo_backend: Union[DynamoBackend, str] = None,\n ):\n+ if save_config is not None:\n+ self.save_configuration = save_config\n+ else:\n+ self.save_configuration = SaveConfiguration()\n+ if project_dir is not None and self.project_dir is None:\n+ self.save_configuration.project_dir = project_dir\n+ if self.project_dir is not None and logging_dir is None:", "from_author": true }, { "body": "```suggestion\r\nif there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.\r\n```", "diff_hunk": "@@ -83,6 +83,12 @@ for iteration in config[\"num_iterations\"]:\n accelerator.end_training()\n ```\n \n+If a tracker requires a directory to save data to such as `TensorBoard` then a `logging_dir` or `project_dir` can be passed in. `project_dir` is useful \n+for if there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "In my setup I have a VM for training and a LXC that hosts TensorBoard linked by a NFS share. I set `logging_dir` to the NFS share so the VM pushes its logs directly to TB as they are generated. Since this PR centralizes logging and output I would no longer be able to dump my logs directly to the TB server and instead have to do some complicated rsync watchdog service.\r\n\r\nCan we keep `logging_dir` as an option? Just set it to `project_dir` by default, no need to depreciate it.", "from_author": false }, { "body": "@Cyberes there is a deprecation cycle for `logging_dir` of two full versions and when it comes to trackers `project_dir` acts the exact same as the `logging_dir` parameter, so for your particular setup if I understand correctly would just need to change `logging_dir` to `project_dir` and nothing else. \r\n\r\nIs this reasonable enough for you? Or is there something more specific/drastic I've misunderstood.", "from_author": true }, { "body": "Just to make sure I'm understanding the new functionality, the checkpoints are saved to `project_dir/checkpoints` and the logs are saved to `project_dir/logs/[name]`?", "from_author": false }, { "body": "Checkpoints are saved to `project_dir/checkpoints`, logs are saved to `project_dir/[name]`", "from_author": true }, { "body": "Yeah, I'd like to have the option to put them in completely separate directories. For example, save the checkpoint states to `project_dir/checkpoints` and TB logs to `/mnt/tensorboard`.\r\n\r\nIf you're against the seperate directories thing I could always override `TensorBoardTracker`, I guess.", "from_author": false }, { "body": "That use case makes sense, will do as you suggested and override only if not passed. Thanks @Cyberes :) ", "from_author": true }, { "body": "Sweeeeeeeeeet, thanks @muellerzr.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/916", "pr_id": 1161333599 }, { "diff": "diff --git a/docs/source/usage_guides/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\nindex 7d6bbbf99..f2684971b 100644\n--- a/docs/source/usage_guides/checkpoint.mdx\n+++ b/docs/source/usage_guides/checkpoint.mdx\n@@ -31,13 +31,13 @@ import torch\n accelerator = Accelerator()\n \n my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)\n-my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)\n \n # Register the LR scheduler\n-accelerate.register_for_checkpointing(my_scheduler)\n+accelerator.register_for_checkpointing(my_scheduler)\n \n # Save the starting state\n-accelerate.save_state(\"my/save/path\")\n+accelerator.save_state(\"my/save/path\")\n \n device = accelerator.device\n my_model.to(device)\n@@ -56,5 +56,5 @@ for epoch in range(num_epochs):\n my_scheduler.step()\n \n # Restore previous state\n-accelerate.load_state(\"my/save/path\")\n+accelerator.load_state(\"my/save/path\")\n ```\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_915). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "Thanks for the fixes! πŸ€—", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/915", "pr_id": 1160928325 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 85fc1b4d2..1c3782067 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -939,9 +939,9 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n backward_prefetch=fsdp_plugin.backward_prefetch,\n mixed_precision=fsdp_plugin.mixed_precision_policy,\n ignored_modules=fsdp_plugin.ignored_modules,\n+ device_id=self.device,\n+ limit_all_gathers=fsdp_plugin.limit_all_gathers,\n )\n- if not fsdp_plugin.cpu_offload.offload_params:\n- model.to(self.device)\n self._models[-1] = model\n elif self.distributed_type == DistributedType.MULTI_CPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 2e286b657..eafad8a09 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -554,6 +554,16 @@ class FullyShardedDataParallelPlugin:\n },\n )\n \n+ limit_all_gathers: bool = field(\n+ default=False,\n+ metadata={\n+ \"help\": \"If False, then FSDP allows the CPU thread to schedule all-gathers \"\n+ \"without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent \"\n+ \"too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. \"\n+ \"Enabling this can help lower the number of CUDA malloc retries.\"\n+ },\n+ )\n+\n def __post_init__(self):\n from torch.distributed.fsdp.fully_sharded_data_parallel import (\n BackwardPrefetch,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/911", "pr_id": 1156061410 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 85fc1b4d2..0808ca462 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -349,7 +349,11 @@ def __init__(\n self.scaler = None\n self.native_amp = False\n err = \"{mode} mixed precision requires {requirement}\"\n- if self.state.mixed_precision == \"fp16\" and self.distributed_type != DistributedType.MEGATRON_LM:\n+ if (\n+ self.state.mixed_precision == \"fp16\"\n+ and self.device.type != \"cpu\"\n+ and self.distributed_type != DistributedType.MEGATRON_LM\n+ ):\n self.native_amp = True\n if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n@@ -1847,7 +1851,7 @@ def autocast(self):\n if self.native_amp:\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n- elif self.mixed_precision == \"bf16\" and is_bf16_available():\n+ elif self.mixed_precision == \"bf16\":\n if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\n autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=self.device.type)\n else:\n", "code_comments": [], "context": [ { "body": "@muellerzr @sgugger @yao-matrix @jiqing-feng.", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/909", "pr_id": 1155223453 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 12f67088a..97bd75833 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -1006,7 +1006,7 @@ def launch_command(args):\n raise ValueError(\n \"You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\n )\n- if args.multi_gpu and args.num_processes < 2:\n+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):\n raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\n \n defaults = None\n", "code_comments": [ { "body": "Can all be grouped in the same if ;-)", "diff_hunk": "@@ -1006,8 +1006,9 @@ def launch_command(args):\n raise ValueError(\n \"You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\n )\n- if args.multi_gpu and args.num_processes < 2:\n- raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\n+ if args.num_processes is not None:\n+ if args.multi_gpu and args.num_processes < 2:\n+ raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/907", "pr_id": 1151607146 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1d6f7ee10..85fc1b4d2 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -365,7 +365,10 @@ def __init__(\n and self.distributed_type != DistributedType.FSDP\n and self.distributed_type != DistributedType.MEGATRON_LM\n ):\n- self.native_amp = is_bf16_available(True)\n+ if self.device.type == \"cpu\":\n+ self.native_amp = is_torch_version(\">=\", \"1.10\")\n+ else:\n+ self.native_amp = is_bf16_available(True)\n if mixed_precision == \"bf16\" and not self.native_amp and not is_tpu_available():\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n \n@@ -948,8 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n- model.forward = torch.autocast(device_type=device_type, dtype=torch.bfloat16)(model.forward)\n+ model.forward = torch.autocast(device_type=self.device.type, dtype=torch.bfloat16)(model.forward)\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n@@ -1847,8 +1849,7 @@ def autocast(self):\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n elif self.mixed_precision == \"bf16\" and is_bf16_available():\n if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\n- device_type = \"cpu\" if not torch.cuda.is_available() else \"cuda\"\n- autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=device_type)\n+ autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=self.device.type)\n else:\n autocast_context = torch.cuda.amp.autocast()\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 12f67088a..af3ba0691 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -1089,7 +1089,7 @@ def launch_command(args):\n [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n )\n threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n- if args.num_cpu_threads_per_process > 1:\n+ if threads_per_process > 1:\n args.num_cpu_threads_per_process = threads_per_process\n warned.append(\n f\"\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs\"\n", "code_comments": [ { "body": "This line (and the next occurrence) can be removed entirely and just pass in `self.device.type` to `torch.autocast`", "diff_hunk": "@@ -948,7 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n+ device_type = \"cuda\" if self.device.type != \"cpu\" and torch.cuda.is_available() else \"cpu\"", "from_author": false }, { "body": "good point. done", "diff_hunk": "@@ -948,7 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n+ device_type = \"cuda\" if self.device.type != \"cpu\" and torch.cuda.is_available() else \"cpu\"", "from_author": true }, { "body": "done", "diff_hunk": "@@ -948,7 +951,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\n- device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n+ device_type = \"cuda\" if self.device.type != \"cpu\" and torch.cuda.is_available() else \"cpu\"", "from_author": true } ], "context": [ { "body": "@sgugger @yao-matrix @jiqing-feng please notice the issue", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/906", "pr_id": 1150778099 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 081380331..12f67088a 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -1002,8 +1002,12 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if sum([args.multi_gpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:\n- raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`, `--use_fsdp`.\")\n+ if sum([args.multi_gpu, args.cpu, args.tpu, args.mps, args.use_deepspeed, args.use_fsdp]) > 1:\n+ raise ValueError(\n+ \"You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\n+ )\n+ if args.multi_gpu and args.num_processes < 2:\n+ raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\n \n defaults = None\n warned = []\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/905", "pr_id": 1149883513 }, { "diff": "diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 490ea3f4f..2e286b657 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -65,7 +65,18 @@ class DistributedDataParallelKwargs(KwargsHandler):\n \n `static_graph` is only available in PyTorch 1.11.0 and later versions.\n \n- </Tip>\"\"\"\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ from accelerate import Accelerator\n+ from accelerate.utils import DistributedDataParallelKwargs\n+\n+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)\n+ accelerator = Accelerator(kwargs_handlers=[kwargs])\n+ ```\n+ \"\"\"\n \n dim: int = 0\n broadcast_buffers: bool = True\n@@ -87,7 +98,18 @@ class GradScalerKwargs(KwargsHandler):\n \n `GradScaler` is only available in PyTorch 1.5.0 and later versions.\n \n- </Tip>\"\"\"\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ from accelerate import Accelerator\n+ from accelerate.utils import GradScalerKwargs\n+\n+ kwargs = GradScalerKwargs(backoff_filter=0.25)\n+ accelerator = Accelerator(kwargs_handlers=[kwargs])\n+ ```\n+ \"\"\"\n \n init_scale: float = 65536.0\n growth_factor: float = 2.0\n@@ -103,6 +125,15 @@ class InitProcessGroupKwargs(KwargsHandler):\n to the documentation of this\n [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more\n information on each argument.\n+\n+ ```python\n+ from datetime import timedelta\n+ from accelerate import Accelerator\n+ from accelerate.utils import InitProcessGroupKwargs\n+\n+ kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))\n+ accelerator = Accelerator(kwargs_handlers=[kwargs])\n+ ```\n \"\"\"\n \n init_method: Optional[str] = None\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/904", "pr_id": 1149817474 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex 3d72aada6..402d57781 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -125,6 +125,8 @@ accelerate launch [arguments] {training_script} --{training_script-argument-1} -\n * `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\n * `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\n * `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.\n+* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations).\n+\n \n The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their \n values. They can also be passed in manually.\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex d778177c6..081380331 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -137,6 +137,12 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\n )\n+ parser.add_argument(\n+ \"--quiet\",\n+ \"-q\",\n+ action=\"store_true\",\n+ help=\"Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)\",\n+ )\n # Hardware selection arguments\n hardware_args = parser.add_argument_group(\n \"Hardware Selection Arguments\", \"Arguments for selecting the hardware to be used.\"\n@@ -564,7 +570,10 @@ def simple_launcher(args):\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ if not args.quiet:\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ else:\n+ sys.exit(1)\n \n \n def multi_gpu_launcher(args):\n@@ -770,7 +779,10 @@ def deepspeed_launcher(args):\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ if not args.quiet:\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ else:\n+ sys.exit(1)\n else:\n if is_torch_version(\"<\", \"1.9.0\"):\n raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\n", "code_comments": [ { "body": "```suggestion\r\n help=\"Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)\",\r\n```", "diff_hunk": "@@ -137,6 +137,12 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\n )\n+ parser.add_argument(\n+ \"--quiet\",\n+ \"-q\",\n+ action=\"store_true\",\n+ help=\"Don't print an error message if an error return code is returned from launch.\",", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I followed the instructions in https://github.com/huggingface/accelerate/pull/895#issuecomment-1333213726\r\n```\r\nblack tests src examples benchmarks\r\nreformatted src/accelerate/commands/launch.py\r\n\r\nAll done! ✨ 🍰 ✨\r\n1 file reformatted, 104 files left unchanged.\r\nisort tests src examples benchmarks\r\npython utils/style_doc.py src/accelerate docs/source --max_len 119\r\nblack --check tests src examples benchmarks\r\nAll done! ✨ 🍰 ✨\r\n105 files would be left unchanged.\r\nisort --check-only tests src examples benchmarks\r\nflake8 tests src examples benchmarks\r\npython utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only\r\n\r\n```", "from_author": true }, { "body": "I'll make those doc changes. Let me know if I should change it to `--silent`/`-s`.\r\nTODO: update docs if it's changed to `--silent`", "from_author": true }, { "body": "No strong opinion on the name on my side.", "from_author": false }, { "body": "We'll keep it as `-q` then, thanks for your contribution @Cyberes! :) ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/902", "pr_id": 1143838386 }, { "diff": "diff --git a/docs/source/package_reference/utilities.mdx b/docs/source/package_reference/utilities.mdx\nindex 0c64953fc..3c1a1065e 100644\n--- a/docs/source/package_reference/utilities.mdx\n+++ b/docs/source/package_reference/utilities.mdx\n@@ -93,3 +93,10 @@ These utilities relate to setting and synchronizing of all the random states.\n [[autodoc]] utils.synchronize_rng_state\n \n [[autodoc]] utils.synchronize_rng_states\n+\n+\n+## PyTorch XLA\n+\n+These include utilities that are useful while using PyTorch with XLA.\n+\n+[[autodoc]] utils.install_xla\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex f46bce267..be78d0089 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -128,4 +128,5 @@\n write_basic_config,\n )\n from .random import set_seed, synchronize_rng_state, synchronize_rng_states\n+from .torch_xla import install_xla\n from .tqdm import tqdm\ndiff --git a/src/accelerate/utils/torch_xla.py b/src/accelerate/utils/torch_xla.py\nnew file mode 100644\nindex 000000000..59fe7970c\n--- /dev/null\n+++ b/src/accelerate/utils/torch_xla.py\n@@ -0,0 +1,44 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import subprocess\n+import sys\n+\n+import pkg_resources\n+\n+\n+def install_xla(upgrade: bool = False):\n+ \"\"\"\n+ Helper function to install appropriate xla wheels based on the `torch` version.\n+\n+ Args:\n+ upgrade (`bool`, *optional*, defaults to `False`):\n+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.\n+ \"\"\"\n+ in_colab = False\n+ if \"IPython\" in sys.modules:\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n+\n+ if in_colab:\n+ if upgrade:\n+ torch_install_cmd = [\"pip\", \"install\", \"-U\", \"torch\"]\n+ subprocess.run(torch_install_cmd, check=True)\n+ # get the current version of torch\n+ torch_version = pkg_resources.get_distribution(\"torch\").version\n+ torch_version_trunc = torch_version[: torch_version.rindex(\".\")]\n+ xla_wheel = f\"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl\"\n+ xla_install_cmd = [\"pip\", \"install\", xla_wheel]\n+ subprocess.run(xla_install_cmd, check=True)\n+ else:\n+ raise RuntimeError(\"`install_xla` utility works only on google colab.\")\n", "code_comments": [ { "body": "Is there a reason to limit this? Everything should work fine in any other env no? Or is it that the wheels can only work on Colab?", "diff_hunk": "@@ -0,0 +1,43 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import sys\n+import subprocess\n+import pkg_resources\n+\n+\n+def install_xla(upgrade: bool = False):\n+ \"\"\"\n+ Helper function to install appropriate xla wheels based on the `torch` version.\n+\n+ Args:\n+ upgrade (`bool`, *optional*, defaults to `False`):\n+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.\n+ \"\"\"\n+ in_colab = False\n+ if \"IPython\" in sys.modules:\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n+\n+ if in_colab:", "from_author": false }, { "body": "The FR was for colab specifically because they have their own wheels for it https://github.com/huggingface/accelerate/issues/586 (otherwise things get more complicated such as trying to get the right python version, etc. Things that can be added later but for rn just an easy way in colab to skip a line :) )", "diff_hunk": "@@ -0,0 +1,43 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import sys\n+import subprocess\n+import pkg_resources\n+\n+\n+def install_xla(upgrade: bool = False):\n+ \"\"\"\n+ Helper function to install appropriate xla wheels based on the `torch` version.\n+\n+ Args:\n+ upgrade (`bool`, *optional*, defaults to `False`):\n+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.\n+ \"\"\"\n+ in_colab = False\n+ if \"IPython\" in sys.modules:\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n+\n+ if in_colab:", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/896", "pr_id": 1140692330 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 4e23c18af..1d6f7ee10 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -237,7 +237,7 @@ def __init__(\n else:\n assert isinstance(\n deepspeed_plugin, DeepSpeedPlugin\n- ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n+ ), \"`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object.\"\n os.environ[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n if deepspeed_plugin:\n if not is_deepspeed_available():\n@@ -285,7 +285,9 @@ def __init__(\n self.init_handler = None\n if kwargs_handlers is not None:\n for handler in kwargs_handlers:\n- assert isinstance(handler, KwargsHandler), f\"Unsupported kwargs handler passed: {handler}.\"\n+ assert isinstance(\n+ handler, KwargsHandler\n+ ), f\"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`.\"\n if isinstance(handler, DistributedDataParallelKwargs):\n if self.ddp_handler is not None:\n raise ValueError(\"You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/894", "pr_id": 1139261065 }, { "diff": "diff --git a/src/accelerate/commands/menu/keymap.py b/src/accelerate/commands/menu/keymap.py\nindex 7ce6c0637..3f08236b3 100644\n--- a/src/accelerate/commands/menu/keymap.py\n+++ b/src/accelerate/commands/menu/keymap.py\n@@ -17,10 +17,9 @@\n \"\"\"\n \n \n+import os\n import string\n import sys\n-import termios\n-import tty\n \n \n ARROW_KEY_FLAG = 1 << 8\n@@ -36,24 +35,77 @@\n \"mod_int\": 91,\n \"undefined\": sys.maxsize,\n \"interrupt\": 3,\n+ \"insert\": 50,\n+ \"delete\": 51,\n+ \"pg_up\": 53,\n+ \"pg_down\": 54,\n }\n \n KEYMAP[\"arrow_begin\"] = KEYMAP[\"up\"]\n KEYMAP[\"arrow_end\"] = KEYMAP[\"left\"]\n \n+if sys.platform == \"win32\":\n+ WIN_CH_BUFFER = []\n+ WIN_KEYMAP = {\n+ b\"\\xe0H\": KEYMAP[\"up\"] - ARROW_KEY_FLAG,\n+ b\"\\x00H\": KEYMAP[\"up\"] - ARROW_KEY_FLAG,\n+ b\"\\xe0P\": KEYMAP[\"down\"] - ARROW_KEY_FLAG,\n+ b\"\\x00P\": KEYMAP[\"down\"] - ARROW_KEY_FLAG,\n+ b\"\\xe0M\": KEYMAP[\"right\"] - ARROW_KEY_FLAG,\n+ b\"\\x00M\": KEYMAP[\"right\"] - ARROW_KEY_FLAG,\n+ b\"\\xe0K\": KEYMAP[\"left\"] - ARROW_KEY_FLAG,\n+ b\"\\x00K\": KEYMAP[\"left\"] - ARROW_KEY_FLAG,\n+ }\n+\n for i in range(10):\n KEYMAP[str(i)] = ord(str(i))\n \n \n def get_raw_chars():\n \"Gets raw characters from inputs\"\n- fd = sys.stdin.fileno()\n- old_settings = termios.tcgetattr(fd)\n- try:\n- tty.setraw(fd)\n- ch = sys.stdin.read(1)\n- finally:\n- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n+ if os.name == \"nt\":\n+ import msvcrt\n+\n+ encoding = \"mbcs\"\n+ # Flush the keyboard buffer\n+ while msvcrt.kbhit():\n+ msvcrt.getwch()\n+ if len(WIN_CH_BUFFER) == 0:\n+ # Read the keystroke\n+ ch = msvcrt.getwch()\n+ # If it is a prefix char, get second part\n+ if ch.encode(encoding) in (b\"\\x00\", b\"\\xe0\"):\n+ ch2 = ch + msvcrt.getwch()\n+ # Translate actual Win chars to bullet char types\n+ try:\n+ chx = chr(WIN_KEYMAP[ch2.encode(encoding)])\n+ WIN_CH_BUFFER.append(chr(KEYMAP[\"mod_int\"]))\n+ WIN_CH_BUFFER.append(chx)\n+ if ord(chx) in (\n+ KEYMAP[\"insert\"] - 1 << 9,\n+ KEYMAP[\"delete\"] - 1 << 9,\n+ KEYMAP[\"pg_up\"] - 1 << 9,\n+ KEYMAP[\"pg_down\"] - 1 << 9,\n+ ):\n+ WIN_CH_BUFFER.append(chr(126))\n+ ch = chr(KEYMAP[\"esc\"])\n+ except KeyError:\n+ ch = ch2[1]\n+ else:\n+ pass\n+ else:\n+ ch = WIN_CH_BUFFER.pop(0)\n+ elif os.name == \"posix\":\n+ import termios\n+ import tty\n+\n+ fd = sys.stdin.fileno()\n+ old_settings = termios.tcgetattr(fd)\n+ try:\n+ tty.setraw(fd)\n+ ch = sys.stdin.read(1)\n+ finally:\n+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n \n \ndiff --git a/src/accelerate/commands/menu/selection_menu.py b/src/accelerate/commands/menu/selection_menu.py\nindex 751f6818a..d5a1c8e35 100644\n--- a/src/accelerate/commands/menu/selection_menu.py\n+++ b/src/accelerate/commands/menu/selection_menu.py\n@@ -15,6 +15,8 @@\n \"\"\"\n Main driver for the selection menu, based on https://github.com/bchao1/bullet\n \"\"\"\n+import sys\n+\n from . import cursor, input\n from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor\n from .keymap import KEYMAP\n@@ -30,12 +32,22 @@ def __init__(self, prompt: str = None, choices: list = []):\n self.position = 0\n self.choices = choices\n self.prompt = prompt\n+ if sys.platform == \"win32\":\n+ self.arrow_char = \"*\"\n+ else:\n+ self.arrow_char = \"βž” \"\n+\n+ def write_choice(self, index, end: str = \"\"):\n+ if sys.platform != \"win32\":\n+ writeColor(self.choices[index], 32, end)\n+ else:\n+ forceWrite(self.choices[index], end)\n \n def print_choice(self, index: int):\n \"Prints the choice at the given index\"\n if index == self.position:\n- forceWrite(\" βž” \")\n- writeColor(self.choices[index], 32)\n+ forceWrite(f\" {self.arrow_char} \")\n+ self.write_choice(index)\n else:\n forceWrite(f\" {self.choices[index]}\")\n reset_cursor()\n@@ -109,6 +121,5 @@ def run(self, default_choice: int = 0):\n for _ in range(len(self.choices) + 1):\n move_cursor(1, \"UP\")\n clear_line()\n- forceWrite(\" βž” \")\n- writeColor(self.choices[choice], 32, \"\\n\")\n+ self.write_choice(choice, \"\\n\")\n return choice\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/893", "pr_id": 1139142981 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex e471731a4..ff3e95ead 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -21,11 +21,10 @@\n from collections import defaultdict\n from typing import Dict, List, Optional, Tuple, Union\n \n-import numpy as np\n import torch\n import torch.nn as nn\n \n-from .offload import offload_weight, save_offload_index\n+from .offload import load_offloaded_weight, offload_weight, save_offload_index\n \n \n WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n@@ -324,9 +323,8 @@ def load_offloaded_weights(model, index, offload_folder):\n \n for param_name, metadata in index.items():\n tensor_file = os.path.join(offload_folder, f\"{param_name}.dat\")\n- shape = tuple(metadata[\"shape\"])\n- weight = np.memmap(tensor_file, dtype=metadata[\"dtype\"], mode=\"r\", shape=shape)\n- set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n+ weight = load_offloaded_weight(tensor_file, metadata)\n+ set_module_tensor_to_device(model, param_name, \"cpu\", value=weight)\n \n \n def get_balanced_memory(\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/892", "pr_id": 1139132684 }, { "diff": "diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 86c9766a1..eca75c1ed 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -119,7 +119,7 @@ def setUp(self):\n self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3)\n \n self.dist_env = dict(\n- USE_DEEPSPEED=\"true\",\n+ ACCELERATE_USE_DEEPSPEED=\"true\",\n MASTER_ADDR=\"localhost\",\n MASTER_PORT=\"10999\",\n RANK=\"0\",\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\nindex e90106c17..19917c671 100644\n--- a/tests/fsdp/test_fsdp.py\n+++ b/tests/fsdp/test_fsdp.py\n@@ -58,7 +58,7 @@ def setUp(self):\n super().setUp()\n \n self.dist_env = dict(\n- USE_FSDP=\"true\",\n+ ACCELERATE_USE_FSDP=\"true\",\n MASTER_ADDR=\"localhost\",\n MASTER_PORT=\"10999\",\n RANK=\"0\",\n@@ -147,7 +147,7 @@ def test_mixed_precision(self):\n \n for mp_dtype in dtypes:\n env = self.dist_env.copy()\n- env[\"MIXED_PRECISION\"] = mp_dtype\n+ env[\"ACCELERATE_MIXED_PRECISION\"] = mp_dtype\n with mockenv_context(**env):\n accelerator = Accelerator()\n if mp_dtype == \"fp16\":\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/891", "pr_id": 1138809005 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 0d0fe0aa6..fabf4991e 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -119,9 +119,9 @@ class Accelerator:\n in your script multiplied by the number of processes.\n mixed_precision (`str`, *optional*):\n Whether or not to use mixed precision training (fp16 or bfloat16). Choose from 'no','fp16','bf16'. Will\n- default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n- accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n- requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ default to the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default\n+ value in the accelerate config of the current system or the flag passed with the `accelerate.launch`\n+ command. 'fp16' requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n gradient_accumulation_steps (`int`, *optional*, default to 1):\n The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with\n `Accelerator.accumulate`.\n@@ -231,39 +231,49 @@ def __init__(\n dynamo_backend = DynamoBackend(dynamo_backend.upper())\n \n if deepspeed_plugin is None: # init from env variables\n- deepspeed_plugin = DeepSpeedPlugin() if os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" else None\n+ deepspeed_plugin = (\n+ DeepSpeedPlugin() if os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" else None\n+ )\n else:\n assert isinstance(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n- os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ os.environ[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n if deepspeed_plugin:\n if not is_deepspeed_available():\n raise ImportError(\"DeepSpeed is not installed => run `pip install deepspeed` or build it from source.\")\n if compare_versions(\"deepspeed\", \"<\", \"0.6.5\"):\n raise ImportError(\"DeepSpeed version must be >= 0.6.5. Please update DeepSpeed.\")\n \n- mixed_precision = os.environ.get(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ mixed_precision = (\n+ os.environ.get(\"ACCELERATE_MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ )\n deepspeed_plugin.set_mixed_precision(mixed_precision)\n deepspeed_plugin.set_deepspeed_weakref()\n \n- if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" or isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n+ if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\" or isinstance(\n+ fsdp_plugin, FullyShardedDataParallelPlugin\n+ ):\n if is_torch_version(\"<\", \"1.12.0\"):\n raise ValueError(\"FSDP requires PyTorch >= 1.12.0\")\n \n if fsdp_plugin is None: # init from env variables\n- fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" else None\n+ fsdp_plugin = (\n+ FullyShardedDataParallelPlugin() if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\" else None\n+ )\n else:\n if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\n- os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n+ os.environ[\"ACCELERATE_USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n if megatron_lm_plugin is None: # init from env variables\n- megatron_lm_plugin = MegatronLMPlugin() if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\" else None\n+ megatron_lm_plugin = (\n+ MegatronLMPlugin() if os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"false\") == \"true\" else None\n+ )\n else:\n if not isinstance(megatron_lm_plugin, MegatronLMPlugin):\n raise TypeError(\"`megatron_lm_plugin` must be a MegatronLMPlugin object.\")\n- os.environ[\"USE_MEGATRON_LM\"] = \"true\" # use MegatronLM if plugin is provided\n+ os.environ[\"ACCELERATE_USE_MEGATRON_LM\"] = \"true\" # use MegatronLM if plugin is provided\n \n if megatron_lm_plugin:\n if not is_megatron_lm_available():\n@@ -339,7 +349,7 @@ def __init__(\n err = \"{mode} mixed precision requires {requirement}\"\n if self.state.mixed_precision == \"fp16\" and self.distributed_type != DistributedType.MEGATRON_LM:\n self.native_amp = True\n- if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\n+ if not torch.cuda.is_available() and not parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\"):\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n if self.distributed_type == DistributedType.FSDP:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 9e37bf0db..d778177c6 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -518,14 +518,14 @@ def simple_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n+ current_env[\"ACCELERATE_USE_CPU\"] = str(args.cpu or args.use_cpu)\n if args.use_mps_device:\n warnings.warn(\n '`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use \"--mps\" instead.',\n FutureWarning,\n )\n args.mps = True\n- current_env[\"USE_MPS_DEVICE\"] = str(args.mps)\n+ current_env[\"ACCELERATE_USE_MPS_DEVICE\"] = str(args.mps)\n if args.mps:\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n elif args.gpu_ids != \"all\" and args.gpu_ids is not None:\n@@ -551,13 +551,13 @@ def simple_launcher(args):\n )\n mixed_precision = \"fp16\"\n \n- current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n \n try:\n dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\n except ValueError:\n raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\n- current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\n+ current_env[\"ACCELERATE_DYNAMO_BACKEND\"] = dynamo_backend.value\n \n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n \n@@ -612,16 +612,16 @@ def multi_gpu_launcher(args):\n )\n mixed_precision = \"fp16\"\n \n- current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n \n try:\n dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\n except ValueError:\n raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\n- current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\n+ current_env[\"ACCELERATE_DYNAMO_BACKEND\"] = dynamo_backend.value\n \n if args.use_fsdp:\n- current_env[\"USE_FSDP\"] = \"true\"\n+ current_env[\"ACCELERATE_USE_FSDP\"] = \"true\"\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\n current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.fsdp_offload_params).lower()\n current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.fsdp_min_num_params)\n@@ -636,7 +636,7 @@ def multi_gpu_launcher(args):\n \n if args.use_megatron_lm:\n prefix = \"MEGATRON_LM_\"\n- current_env[\"USE_MEGATRON_LM\"] = \"true\"\n+ current_env[\"ACCELERATE_USE_MEGATRON_LM\"] = \"true\"\n current_env[prefix + \"TP_DEGREE\"] = str(args.megatron_lm_tp_degree)\n current_env[prefix + \"PP_DEGREE\"] = str(args.megatron_lm_pp_degree)\n current_env[prefix + \"GRADIENT_CLIPPING\"] = str(args.megatron_lm_gradient_clipping)\n@@ -748,8 +748,8 @@ def deepspeed_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n- current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n- current_env[\"USE_DEEPSPEED\"] = \"true\"\n+ current_env[\"ACCELERATE_MIXED_PRECISION\"] = str(mixed_precision)\n+ current_env[\"ACCELERATE_USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n@@ -924,10 +924,10 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n # Environment variables to be set for use during training job\n environment = {\n- \"USE_SAGEMAKER\": \"true\",\n- \"MIXED_PRECISION\": str(mixed_precision),\n- \"DYNAMO_BACKEND\": dynamo_backend.value,\n- \"SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\n+ \"ACCELERATE_USE_SAGEMAKER\": \"true\",\n+ \"ACCELERATE_MIXED_PRECISION\": str(mixed_precision),\n+ \"ACCELERATE_DYNAMO_BACKEND\": dynamo_backend.value,\n+ \"ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\n }\n # configure distribution set up\n distribution = None\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex 4de8b5858..5768ec06b 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -163,9 +163,9 @@ def debug_launcher(function, args=(), num_processes=2):\n world_size=num_processes,\n master_addr=\"127.0.01\",\n master_port=\"29500\",\n- mixed_precision=\"no\",\n+ accelerate_mixed_precision=\"no\",\n accelerate_debug_rdv_file=tmp_file.name,\n- use_cpu=\"yes\",\n+ accelerate_use_cpu=\"yes\",\n ):\n launcher = PrepareForLaunch(function, debug=True)\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 18df61bbc..a8a5e7960 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -66,7 +66,7 @@ def __init__(\n **kwargs,\n ):\n self.__dict__ = self._shared_state\n- if parse_flag_from_env(\"USE_CPU\"):\n+ if parse_flag_from_env(\"ACCELERATE_USE_CPU\"):\n cpu = True\n self._check_initialized(mixed_precision, cpu)\n self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n@@ -74,10 +74,12 @@ def __init__(\n self.backend = None\n self.deepspeed_plugin = None\n mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision.lower()\n+ parse_choice_from_env(\"ACCELERATE_MIXED_PRECISION\", \"no\")\n+ if mixed_precision is None\n+ else mixed_precision.lower()\n )\n dynamo_backend = (\n- parse_choice_from_env(\"DYNAMO_BACKEND\", \"no\") if dynamo_backend is None else dynamo_backend\n+ parse_choice_from_env(\"ACCELERATE_DYNAMO_BACKEND\", \"no\") if dynamo_backend is None else dynamo_backend\n )\n self.dynamo_backend = DynamoBackend(dynamo_backend.upper())\n if not _from_accelerator:\n@@ -86,11 +88,11 @@ def __init__(\n \"before using any functionality from the `accelerate` library.\"\n )\n if (\n- os.environ.get(\"USE_SAGEMAKER\", \"false\") == \"true\"\n- and os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") != SageMakerDistributedType.NO\n+ os.environ.get(\"ACCELERATE_USE_SAGEMAKER\", \"false\") == \"true\"\n+ and os.environ.get(\"ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE\") != SageMakerDistributedType.NO\n and not cpu\n ):\n- if os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.DATA_PARALLEL:\n+ if os.environ.get(\"ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.DATA_PARALLEL:\n self.distributed_type = DistributedType.MULTI_GPU\n import smdistributed.dataparallel.torch.torch_smddp # noqa\n \n@@ -110,7 +112,7 @@ def __init__(\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n if mixed_precision == \"bf16\":\n- if os.environ.get(\"DOWNCAST_BF16\"):\n+ if os.environ.get(\"ACCELERATE_DOWNCAST_BF16\"):\n os.environ[\"XLA_USE_BF16\"] = str(0)\n os.environ[\"XLA_DOWNCAST_BF16\"] = str(1)\n self.downcast_bfloat = True\n@@ -119,7 +121,7 @@ def __init__(\n os.environ[\"XLA_DOWNCAST_BF16\"] = str(0)\n self.downcast_bfloat = False\n self.mixed_precision = mixed_precision\n- elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n+ elif os.environ.get(\"ACCELERATE_USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n assert (\n is_deepspeed_available()\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n@@ -153,12 +155,12 @@ def __init__(\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.mixed_precision = mixed_precision\n- if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ if os.environ.get(\"ACCELERATE_USE_FSDP\", \"false\") == \"true\":\n self.distributed_type = DistributedType.FSDP\n if self.mixed_precision != \"no\":\n fsdp_plugin.set_mixed_precision(self.mixed_precision)\n self.fsdp_plugin = fsdp_plugin\n- if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":\n+ if os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"false\") == \"true\":\n self.distributed_type = DistributedType.MEGATRON_LM\n megatron_lm_plugin.set_mixed_precision(self.mixed_precision)\n self.megatron_lm_plugin = megatron_lm_plugin\n@@ -207,7 +209,7 @@ def __init__(\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n self.process_index = self.local_process_index = 0\n- if parse_flag_from_env(\"USE_MPS_DEVICE\") and not cpu:\n+ if parse_flag_from_env(\"ACCELERATE_USE_MPS_DEVICE\") and not cpu:\n if not torch.backends.mps.is_available():\n if not torch.backends.mps.is_built():\n raise AssertionError(\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex bfedee162..4ac250e6e 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -15,6 +15,7 @@\n import importlib\n import os\n import sys\n+import warnings\n from distutils.util import strtobool\n from functools import lru_cache\n \n@@ -93,7 +94,7 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- if strtobool(os.environ.get(\"USE_MEGATRON_LM\", \"False\")) == 1:\n+ if strtobool(os.environ.get(\"ACCELERATE_USE_MEGATRON_LM\", \"False\")) == 1:\n package_exists = importlib.util.find_spec(\"megatron\") is not None\n if package_exists:\n megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n@@ -130,7 +131,15 @@ def is_boto3_available():\n \n \n def is_rich_available():\n- return (importlib.util.find_spec(\"rich\") is not None) and (not parse_flag_from_env(\"DISABLE_RICH\"))\n+ if importlib.util.find_spec(\"rich\") is not None:\n+ if parse_flag_from_env(\"DISABLE_RICH\"):\n+ warnings.warn(\n+ \"The `DISABLE_RICH` flag is deprecated and will be removed in version 0.17.0 of πŸ€— Accelerate. Use `ACCELERATE_DISABLE_RICH` instead.\",\n+ FutureWarning,\n+ )\n+ return not parse_flag_from_env(\"DISABLE_RICH\")\n+ return not parse_flag_from_env(\"ACCELERATE_DISABLE_RICH\")\n+ return False\n \n \n def is_sagemaker_available():\n", "code_comments": [ { "body": "This feels over-engineered, and outside the scope of this PR.", "diff_hunk": "@@ -0,0 +1,68 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import warnings\n+\n+\n+def deprecate_value(old=None, new=None, version=None, warn=True, deprecate_str=None):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/890", "pr_id": 1137735853 }, { "diff": "diff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\nindex 0fdff58ae..6cfee9dda 100755\n--- a/examples/by_feature/deepspeed_with_config_support.py\n+++ b/examples/by_feature/deepspeed_with_config_support.py\n@@ -642,7 +642,7 @@ def group_texts(examples):\n total_loss += loss.detach().float()\n loss = loss / args.gradient_accumulation_steps\n accelerator.backward(loss)\n- if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n+ if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks! πŸ€—", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/887", "pr_id": 1136396651 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 0d0fe0aa6..0541ce506 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -795,7 +795,7 @@ def prepare(self, *args, device_placement=None):\n - `torch.utils.data.DataLoader`: PyTorch Dataloader\n - `torch.nn.Module`: PyTorch Module\n - `torch.optim.Optimizer`: PyTorch Optimizer\n- - `torch.optim.lr_scheduler._LRScheduler`: PyTorch LR Scheduler\n+ - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler\n \n device_placement (`List[bool]`, *optional*):\n Used to customize whether automatic device placement should be performed for each object passed. Needs\n@@ -998,7 +998,7 @@ def _prepare_deepspeed(self, *args):\n model = obj\n elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):\n optimizer = obj\n- elif (isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ elif (isinstance(obj, (LRScheduler, DummyScheduler))) or (\n type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n ):\n scheduler = obj\n@@ -1097,7 +1097,7 @@ def _prepare_deepspeed(self, *args):\n result[i] = engine\n elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):\n result[i] = optimizer\n- elif (isinstance(result[i], (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or (\n type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n ):\n result[i] = scheduler\n@@ -1150,7 +1150,7 @@ def _prepare_megatron_lm(self, *args):\n model = obj\n elif isinstance(obj, (torch.optim.Optimizer)):\n optimizer = obj\n- elif isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, MegatronLMDummyScheduler)):\n+ elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):\n scheduler = obj\n \n if model is not None:\n@@ -1260,13 +1260,13 @@ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=N\n self._optimizers.append(optimizer)\n return optimizer\n \n- def prepare_scheduler(self, scheduler: torch.optim.lr_scheduler._LRScheduler):\n+ def prepare_scheduler(self, scheduler: LRScheduler):\n \"\"\"\n Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use\n [`Accelerator.prepare`] instead.\n \n Args:\n- scheduler (`torch.optim.lr_scheduler._LRScheduler`):\n+ scheduler (`torch.optim.lr_scheduler.LRScheduler`):\n A vanilla PyTorch scheduler to prepare\n \"\"\"\n # We try to find the optimizer associated with `scheduler`, the default is the full list.\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/884", "pr_id": 1134441117 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a29794fc4..0d0fe0aa6 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -751,7 +751,7 @@ def _prepare_fsdp(self, *args):\n for obj in args:\n if isinstance(obj, torch.optim.Optimizer):\n if len(obj.param_groups) > 1:\n- logger.warn(\n+ logger.warning(\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n \"a single one due to nested module wrapping and parameter flattening.\"\n )\n@@ -831,7 +831,7 @@ def prepare(self, *args, device_placement=None):\n \"Then pass the optimizers to the prepare call in the same order as corresponding models.\"\n )\n elif model_count == 1 and optimizer_present:\n- logger.warn(\n+ logger.warning(\n \"FSDP Warning: When using FSDP, \"\n \"it is efficient and recommended to call prepare for the model before creating the optimizer\"\n )\n@@ -1713,7 +1713,7 @@ def load_state(self, input_dir: str):\n err = \"Warning! Number of found checkpoints does not match the number of registered objects:\"\n err += f\"\\n\\tFound checkpoints: {len(custom_checkpoints)}\"\n err += f\"\\n\\tRegistered objects: {len(self._custom_objects)}\\nSkipping.\"\n- logger.warn(err)\n+ logger.warning(err)\n else:\n logger.info(f\"Loading in {len(custom_checkpoints)} custom states\")\n for index, obj in enumerate(self._custom_objects):\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex b2d95bad8..9e37bf0db 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -1085,7 +1085,7 @@ def launch_command(args):\n message += (\n \"\\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.\"\n )\n- logger.warn(message)\n+ logger.warning(message)\n \n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/881", "pr_id": 1132334450 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex d6a323559..be366c6e3 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -95,6 +95,12 @@\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.xla_multiprocessing as xmp\n \n+\n+if is_torch_version(\"<=\", \"1.13.5\"):\n+ from torch.optim.lr_scheduler import _LRScheduler as LRScheduler\n+else:\n+ from torch.optim.lr_scheduler import LRScheduler as LRScheduler\n+\n logger = get_logger(__name__)\n \n \n@@ -725,7 +731,7 @@ def _prepare_one(self, obj, first_pass=False, device_placement=None):\n optimizer = self.prepare_optimizer(obj, device_placement=device_placement)\n return optimizer\n # Second pass of preparation: LR scheduler (which need the full list of optimizers)\n- elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler):\n+ elif isinstance(obj, LRScheduler):\n scheduler = self.prepare_scheduler(obj)\n return scheduler\n # Return the unprocessed object if previous criteria was not met\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/878", "pr_id": 1130077770 }, { "diff": "diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex bb14078d4..442b63cf5 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -31,6 +31,7 @@\n import torch\n \n from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE, MODEL_NAME, OPTIMIZER_NAME\n+from .versions import is_torch_version\n \n \n class KwargsHandler:\n@@ -526,9 +527,9 @@ def __post_init__(self):\n from torch.distributed.fsdp.fully_sharded_data_parallel import (\n BackwardPrefetch,\n CPUOffload,\n+ FullStateDictConfig,\n ShardingStrategy,\n StateDictType,\n- _state_dict_type_to_config,\n )\n \n if self.sharding_strategy is None:\n@@ -549,12 +550,8 @@ def __post_init__(self):\n state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", \"FULL_STATE_DICT\")\n self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)\n \n- if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n- self.state_dict_config = _state_dict_type_to_config[self.state_dict_type](\n- offload_to_cpu=True, rank0_only=True\n- )\n- else:\n- self.state_dict_config = _state_dict_type_to_config[self.state_dict_type]()\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT and self.state_dict_config is None:\n+ self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)\n \n @staticmethod\n def get_module_class_from_name(module, name):\n@@ -616,9 +613,14 @@ def save_model(self, accelerator, model, output_dir, model_index=0):\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n \n- if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n+ if is_torch_version(\"<=\", \"1.13.5\"):\n with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n state_dict = model.state_dict()\n+ else:\n+ FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config)\n+ state_dict = model.state_dict()\n+\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n output_model_file = os.path.join(output_dir, weights_name)\n if accelerator.process_index == 0:\n@@ -626,8 +628,6 @@ def save_model(self, accelerator, model, output_dir, model_index=0):\n torch.save(state_dict, output_model_file)\n print(f\"Model saved to {output_model_file}\")\n else:\n- with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n- state_dict = model.state_dict()\n weights_name = (\n f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n if model_index == 0\n@@ -660,7 +660,12 @@ def load_model(self, accelerator, model, input_dir, model_index=0):\n print(f\"Loading model from {input_model_file}\")\n state_dict = torch.load(input_model_file)\n print(f\"Model loaded from {input_model_file}\")\n- with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n+\n+ if is_torch_version(\"<=\", \"1.13.5\"):\n+ with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n+ model.load_state_dict(state_dict)\n+ else:\n+ FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config)\n model.load_state_dict(state_dict)\n \n def save_optimizer(self, accelerator, optimizer, model, output_dir, optimizer_index=0, optim_input=None):\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\nindex 249d2b692..e90106c17 100644\n--- a/tests/fsdp/test_fsdp.py\n+++ b/tests/fsdp/test_fsdp.py\n@@ -95,7 +95,7 @@ def test_backward_prefetch(self):\n self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1))\n \n def test_state_dict_type(self):\n- from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType, _state_dict_type_to_config\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n \n for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n env = self.dist_env.copy()\n@@ -103,9 +103,6 @@ def test_state_dict_type(self):\n with mockenv_context(**env):\n fsdp_plugin = FullyShardedDataParallelPlugin()\n self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1))\n- self.assertEqual(\n- type(fsdp_plugin.state_dict_config), type(_state_dict_type_to_config[StateDictType(i + 1)]())\n- )\n if state_dict_type == \"FULL_STATE_DICT\":\n self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)\n self.assertTrue(fsdp_plugin.state_dict_config.rank0_only)\n", "code_comments": [ { "body": "```suggestion\r\n if is_torch_version(\"<=\", \"1.13.5\"):\r\n```\r\nJust in case they make a patch release before 1.14 is out.", "diff_hunk": "@@ -616,18 +613,21 @@ def save_model(self, accelerator, model, output_dir, model_index=0):\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n \n- if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n+ if is_torch_version(\"<=\", \"1.13.0\"):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/877", "pr_id": 1129923134 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex e277e89a2..24857ad25 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -210,7 +210,8 @@ def dispatch_model(\n device_map: Dict[str, Union[str, int, torch.device]],\n main_device: Optional[torch.device] = None,\n state_dict: Optional[Dict[str, torch.Tensor]] = None,\n- offload_dir: Union[str, os.PathLike] = None,\n+ offload_dir: Optional[Union[str, os.PathLike]] = None,\n+ offload_index: Optional[Dict[str, str]] = None,\n offload_buffers: bool = False,\n preload_module_classes: Optional[List[str]] = None,\n ):\n@@ -231,6 +232,9 @@ def dispatch_model(\n The state dict of the part of the model that will be kept on CPU.\n offload_dir (`str` or `os.PathLike`):\n The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_index (`Dict`, *optional*):\n+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default\n+ to the index saved in `save_folder`.\n offload_buffers (`bool`, *optional*, defaults to `False`):\n Whether or not to offload the buffers with the model parameters.\n preload_module_classes (`List[str]`, *optional*):\n@@ -256,13 +260,15 @@ def dispatch_model(\n state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n \n disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n- if offload_dir is None and len(disk_modules) > 0:\n+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:\n raise ValueError(\n \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\n f\"need to be offloaded: {', '.join(disk_modules)}.\"\n )\n- if len(disk_modules) > 0 and (\n- not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\n+ if (\n+ len(disk_modules) > 0\n+ and offload_index is None\n+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")))\n ):\n disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\n offload_state_dict(offload_dir, disk_state_dict)\n@@ -273,8 +279,11 @@ def dispatch_model(\n offloaded_devices = [\"disk\"] if main_device == \"cpu\" else [\"cpu\", \"disk\"]\n offload = {name: device in offloaded_devices for name, device in device_map.items()}\n save_folder = offload_dir if len(disk_modules) > 0 else None\n- if state_dict is not None or save_folder is not None:\n- weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder)\n+ if state_dict is not None or save_folder is not None or offload_index is not None:\n+ device = main_device if offload_index is not None else None\n+ weights_map = OffloadedWeightsLoader(\n+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device\n+ )\n else:\n weights_map = None\n \ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 87f891021..f46bce267 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -34,6 +34,7 @@\n is_megatron_lm_available,\n is_mlflow_available,\n is_rich_available,\n+ is_safetensors_available,\n is_sagemaker_available,\n is_tensorboard_available,\n is_tpu_available,\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex bfedee162..72d6202cd 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -101,6 +101,10 @@ def is_megatron_lm_available():\n return False\n \n \n+def is_safetensors_available():\n+ return importlib.util.find_spec(\"safetensors\") is not None\n+\n+\n def is_transformers_available():\n return importlib.util.find_spec(\"transformers\") is not None\n \ndiff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex bc0fcdef1..22f51cce1 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -20,6 +20,12 @@\n import numpy as np\n import torch\n \n+from ..logging import get_logger\n+from .imports import is_safetensors_available\n+\n+\n+logger = get_logger(__name__)\n+\n \n def offload_weight(weight, weight_name, offload_folder, index=None):\n dtype = None\n@@ -131,8 +137,8 @@ class OffloadedWeightsLoader(Mapping):\n save_folder (`str` or `os.PathLike`, *optional*):\n The directory in which the weights are stored (by `offload_state_dict` for instance).\n index (`Dict`, *optional*):\n- A dictionary from weight name to their information (`dtype` and `shape`). Will default to the index saved\n- in `save_folder`.\n+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default\n+ to the index saved in `save_folder`.\n \"\"\"\n \n def __init__(\n@@ -140,6 +146,7 @@ def __init__(\n state_dict: Dict[str, torch.Tensor] = None,\n save_folder: Optional[Union[str, os.PathLike]] = None,\n index: Mapping = None,\n+ device=None,\n ):\n if state_dict is None and save_folder is None:\n raise ValueError(\"Need either a `state_dict` or a `save_folder` containing offloaded weights.\")\n@@ -152,12 +159,32 @@ def __init__(\n self.index = {} if index is None else index\n self.all_keys = list(self.state_dict.keys())\n self.all_keys.extend([key for key in self.index if key not in self.all_keys])\n+ self.device = device\n \n def __getitem__(self, key: str):\n # State dict gets priority\n if key in self.state_dict:\n return self.state_dict[key]\n weight_info = self.index[key]\n+ if weight_info.get(\"safetensors_file\") is not None:\n+ if not is_safetensors_available():\n+ raise ImportError(\"These offloaded weights require the use of safetensors: `pip install safetensors`.\")\n+\n+ if \"SAFETENSORS_FAST_GPU\" not in os.environ:\n+ logger.info(\"Enabling fast loading with safetensors by setting `SAFETENSORS_FAST_GPU` to 1.\")\n+ os.environ[\"SAFETENSORS_FAST_GPU\"] = \"1\"\n+\n+ from safetensors import safe_open\n+\n+ device = \"cpu\" if self.device is None else self.device\n+ with safe_open(weight_info[\"safetensors_file\"], framework=\"pt\", device=device) as f:\n+ tensor = f.get_tensor(weight_info.get(\"weight_name\", key))\n+\n+ if \"dtype\" in weight_info:\n+ return tensor.to(getattr(torch, weight_info[\"dtype\"]))\n+ else:\n+ return tensor\n+\n weight_file = os.path.join(self.save_folder, f\"{key}.dat\")\n return load_offloaded_weight(weight_file, weight_info)\n \n", "code_comments": [ { "body": "We need a guard here for `is_safetensors_available` and raise an error if not :) (aka make one :) )", "diff_hunk": "@@ -152,12 +153,25 @@ def __init__(\n self.index = {} if index is None else index\n self.all_keys = list(self.state_dict.keys())\n self.all_keys.extend([key for key in self.index if key not in self.all_keys])\n+ self.device = device\n \n def __getitem__(self, key: str):\n # State dict gets priority\n if key in self.state_dict:\n return self.state_dict[key]\n weight_info = self.index[key]\n+ if weight_info.get(\"safetensors_file\") is not None:\n+ from safetensors import safe_open", "from_author": false }, { "body": "Indeed, will clean that up on Monday!", "diff_hunk": "@@ -152,12 +153,25 @@ def __init__(\n self.index = {} if index is None else index\n self.all_keys = list(self.state_dict.keys())\n self.all_keys.extend([key for key in self.index if key not in self.all_keys])\n+ self.device = device\n \n def __getitem__(self, key: str):\n # State dict gets priority\n if key in self.state_dict:\n return self.state_dict[key]\n weight_info = self.index[key]\n+ if weight_info.get(\"safetensors_file\") is not None:\n+ from safetensors import safe_open", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/873", "pr_id": 1128165008 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex d6a323559..d614b47e2 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -926,6 +926,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n+ model._original_forward = model.forward\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 7ba4482cc..3ebea2eef 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -17,6 +17,7 @@\n \"\"\"\n \n \n+import pickle\n from functools import update_wrapper\n from typing import Any, Mapping\n \n@@ -473,8 +474,6 @@ class ConvertOutputsToFp32:\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n precision will be convert back to FP32.\n \n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n-\n Args:\n model_forward (`Callable`):\n The function which outputs we want to treat.\n@@ -490,6 +489,11 @@ def __init__(self, model_forward):\n def __call__(self, *args, **kwargs):\n return convert_to_fp32(self.model_forward(*args, **kwargs))\n \n+ def __getstate__(self):\n+ raise pickle.PicklingError(\n+ \"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it.\"\n+ )\n+\n \n convert_outputs_to_fp32 = ConvertOutputsToFp32\n \ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex a4a7b6312..1eb7cca88 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -21,7 +21,6 @@\n from ..state import AcceleratorState\n from .dataclasses import DistributedType\n from .imports import is_deepspeed_available, is_tpu_available\n-from .operations import ConvertOutputsToFp32\n \n \n if is_deepspeed_available():\n@@ -53,8 +52,13 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\n \n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n- if isinstance(forward, ConvertOutputsToFp32):\n- setattr(model, \"forward\", forward.model_forward)\n+ original_forward = model.__dict__.pop(\"_original_forward\", None)\n+ if original_forward is not None:\n+ while hasattr(forward, \"__wrapped__\"):\n+ forward = forward.__wrapped__\n+ if forward == original_forward:\n+ break\n+ model.forward = forward\n return model\n \n \ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex 1e9d18c17..7c7629a1a 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -19,8 +19,15 @@\n \n import torch\n \n+from accelerate.test_utils.testing import require_cuda\n from accelerate.test_utils.training import RegressionModel\n-from accelerate.utils import convert_outputs_to_fp32, find_device, patch_environment, send_to_device\n+from accelerate.utils import (\n+ convert_outputs_to_fp32,\n+ extract_model_from_parallel,\n+ find_device,\n+ patch_environment,\n+ send_to_device,\n+)\n \n \n ExampleNamedTuple = namedtuple(\"ExampleNamedTuple\", \"a b c\")\n@@ -74,9 +81,20 @@ def test_patch_environment(self):\n self.assertNotIn(\"AA\", os.environ)\n self.assertNotIn(\"BB\", os.environ)\n \n- def test_convert_to_32_lets_model_pickle(self):\n+ def test_can_undo_convert_outputs(self):\n model = RegressionModel()\n+ model._original_forward = model.forward\n model.forward = convert_outputs_to_fp32(model.forward)\n+ model = extract_model_from_parallel(model)\n+ _ = pickle.dumps(model)\n+\n+ @require_cuda\n+ def test_can_undo_fp16_conversion(self):\n+ model = RegressionModel()\n+ model._original_forward = model.forward\n+ model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n+ model.forward = convert_outputs_to_fp32(model.forward)\n+ model = extract_model_from_parallel(model)\n _ = pickle.dumps(model)\n \n def test_find_device(self):\n", "code_comments": [ { "body": "@sgugger I know how you feel about `_` but I felt this was a decent time for one because the user really *shouldn't* need to know this exists at all or try to accidentally call it in any way", "diff_hunk": "@@ -926,6 +926,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n+ model._original_forward = model.forward", "from_author": true }, { "body": "Agree in this case :-)", "diff_hunk": "@@ -926,6 +926,7 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n+ model._original_forward = model.forward", "from_author": false }, { "body": "Are we sure this does not make a regression with this issue?", "diff_hunk": "@@ -468,13 +468,11 @@ def _is_fp16_bf16_tensor(tensor):\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n \n \n-class ConvertOutputsToFp32:\n+def convert_outputs_to_fp32(model_forward):\n \"\"\"\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n precision will be convert back to FP32.\n \n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n-", "from_author": false }, { "body": "The issue will always exist because it can never be pickled if we've wrapped it in a mixed precision decorator. So it will always have to go through `unwrap_model`. And this only ever comes up if we do mixed precision. See the nb shown in the bug report here: https://colab.research.google.com/drive/11fvrk1Jslw2VIRTF6h0pdGgJx5AkAIMv?usp=sharing\r\n\r\nWhere specifically our explicit wrapping around `torch.cuda.amp` causes issues, and technically was the original issue too.\r\n\r\nOr, phrasing it another way, the original issue doesn't come from Accelerate actually it comes from torch and using the autocast decorator. To keep things consistent this just makes them all use decorators so that we can quickly just unwrap all of them. ", "diff_hunk": "@@ -468,13 +468,11 @@ def _is_fp16_bf16_tensor(tensor):\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n \n \n-class ConvertOutputsToFp32:\n+def convert_outputs_to_fp32(model_forward):\n \"\"\"\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n precision will be convert back to FP32.\n \n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n-", "from_author": true }, { "body": "I can try to see if it's possible to inject a warning before trying to pickle/save it and tell the user they need to unwrap the model first. ", "diff_hunk": "@@ -468,13 +468,11 @@ def _is_fp16_bf16_tensor(tensor):\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n \n \n-class ConvertOutputsToFp32:\n+def convert_outputs_to_fp32(model_forward):\n \"\"\"\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n precision will be convert back to FP32.\n \n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n-", "from_author": true }, { "body": "Let's put your comment on the PR here as well, to explain what this code is doing πŸ™ ", "diff_hunk": "@@ -53,8 +52,13 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\n \n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n- if isinstance(forward, ConvertOutputsToFp32):\n- setattr(model, \"forward\", forward.model_forward)\n+ original_forward = model.__dict__.pop(\"_original_forward\", None)\n+ if original_forward is not None:\n+ while hasattr(forward, \"__wrapped__\"):\n+ forward = forward.__wrapped__\n+ if forward == original_forward:\n+ break", "from_author": false }, { "body": "A custom `__getstate__` func let's us raise an error when the user tries to prepare a model that had automatic mixed precision performed, ensuring with a clear error what the proper behavior needs to be, and the user doesn't have to know that this is due to PyTorch *and* accelerate and their behaviors", "diff_hunk": "@@ -490,6 +488,11 @@ def __init__(self, model_forward):\n def __call__(self, *args, **kwargs):\n return convert_to_fp32(self.model_forward(*args, **kwargs))\n \n+ def __getstate__(self):\n+ raise pickle.PicklingError(\n+ \"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it.\"\n+ )\n+", "from_author": true }, { "body": "When a function is wrapped using functools (such that we have `torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)` the wrapped function (the original `model.forward`) exists in the `__wrapped__` attribute. When using `functools.update_wrapper` (such as in `ConvertOutputsToFp32`) this will also in turn add a new `__wrapped__` function, allowing us to follow the chain of `__wrapped__` until we return to the original model forward function, which was stored away earlier.", "diff_hunk": "@@ -53,8 +52,13 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\n \n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n- if isinstance(forward, ConvertOutputsToFp32):\n- setattr(model, \"forward\", forward.model_forward)\n+ original_forward = model.__dict__.pop(\"_original_forward\", None)\n+ if original_forward is not None:\n+ while hasattr(forward, \"__wrapped__\"):\n+ forward = forward.__wrapped__\n+ if forward == original_forward:\n+ break", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Some notes on the final version (which will also be trickled into the code of the PR):\r\n\r\n- I kept this as a class so that we can implement a custom `__getstate__` which will return an explicit pickling error if someone tries to pickle a wrapped model with automatic mixed precision on it\r\n- When a function is wrapped using functools (such that we have `torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)` the wrapped function (the original `model.forward`) exists in the `__wrapped__` attribute. When using `functools.update_wrapper` (such as in `ConvertOutputsToFp32`) this will also in turn add a new `__wrapped__` function, allowing us to follow the chain of `__wrapped__` until we return to the original model forward function, which was stored away earlier. ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/872", "pr_id": 1128136442 }, { "diff": "diff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\nindex 2eceb223e..679f2da5c 100644\n--- a/src/accelerate/logging.py\n+++ b/src/accelerate/logging.py\n@@ -78,7 +78,8 @@ def get_logger(name: str, log_level: str = None):\n ```\n \"\"\"\n if log_level is None:\n- log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"WARNING\")\n+ log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", None)\n logger = logging.getLogger(name)\n- logging.basicConfig(level=log_level.upper())\n+ if log_level is not None:\n+ logger.setLevel(log_level.upper())\n return MultiProcessAdapter(logger, {})\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/871", "pr_id": 1127844339 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 6ac8d8d3a..d6a323559 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -953,7 +953,9 @@ def _prepare_deepspeed(self, *args):\n batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n if len(batch_sizes) == 0:\n raise ValueError(\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ \"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders \"\n+ \"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file\"\n+ \"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.\"\n )\n \n batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 1be794421..86c9766a1 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -350,7 +350,9 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n with self.assertRaises(ValueError) as cm:\n model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)\n self.assertTrue(\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ \"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders \"\n+ \"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file\"\n+ \"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.\"\n in str(cm.exception)\n )\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/868", "pr_id": 1126820335 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex a653b9d6a..a3dcd2dcb 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -63,21 +63,21 @@ def get_cluster_input():\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n- lambda x: int(x),\n+ int,\n default=1,\n )\n if num_machines > 1:\n machine_rank = _ask_options(\n \"What is the rank of this machine?\",\n list(range(num_machines)),\n- lambda x: int(x),\n+ int,\n )\n main_process_ip = _ask_field(\n \"What is the IP address of the machine that will host the main process? \",\n )\n main_process_port = _ask_field(\n \"What is the port you will use to communicate with the main process? \",\n- lambda x: int(x),\n+ int,\n )\n same_network = _ask_field(\n \"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: \",\n@@ -153,14 +153,14 @@ def get_cluster_input():\n if use_deepspeed_config:\n deepspeed_config[\"deepspeed_config_file\"] = _ask_field(\n \"Please enter the path to the json DeepSpeed config file: \",\n- lambda x: str(x),\n+ str,\n default=\"none\",\n )\n else:\n deepspeed_config[\"zero_stage\"] = _ask_options(\n \"What should be your DeepSpeed's ZeRO optimization stage?\",\n [0, 1, 2, 3],\n- lambda x: int(x),\n+ int,\n default=2,\n )\n \n@@ -174,7 +174,7 @@ def get_cluster_input():\n )\n deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\n \"How many gradient accumulation steps you're passing in your script? [1]: \",\n- lambda x: int(x),\n+ int,\n default=1,\n )\n use_gradient_clipping = _ask_field(\n@@ -186,7 +186,7 @@ def get_cluster_input():\n if use_gradient_clipping:\n deepspeed_config[\"gradient_clipping\"] = _ask_field(\n \"What is the gradient clipping value? [1.0]: \",\n- lambda x: float(x),\n+ float,\n default=1.0,\n )\n if deepspeed_config[\"zero_stage\"] == 3:\n@@ -224,7 +224,7 @@ def get_cluster_input():\n \"for more information please refer official [documentation]\"\n \"(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). \"\n \"Please specify the location of hostfile: \",\n- lambda x: str(x),\n+ str,\n )\n \n is_exclusion_filter = _ask_field(\n@@ -236,7 +236,7 @@ def get_cluster_input():\n if is_exclusion_filter:\n deepspeed_config[\"deepspeed_exclusion_filter\"] = _ask_field(\n \"DeepSpeed exclusion filter string: \",\n- lambda x: str(x),\n+ str,\n )\n \n is_inclusion_filter = _ask_field(\n@@ -248,7 +248,7 @@ def get_cluster_input():\n if is_inclusion_filter:\n deepspeed_config[\"deepspeed_inclusion_filter\"] = _ask_field(\n \"DeepSpeed inclusion filter string: \",\n- lambda x: str(x),\n+ str,\n )\n \n fsdp_config = {}\n@@ -284,12 +284,12 @@ def get_cluster_input():\n if fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[0]:\n fsdp_config[\"fsdp_transformer_layer_cls_to_wrap\"] = _ask_field(\n \"What is the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...? : \",\n- lambda x: str(x),\n+ str,\n )\n elif fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[1]:\n fsdp_config[\"fsdp_min_num_params\"] = _ask_field(\n \"What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: \",\n- lambda x: int(x),\n+ int,\n default=1e8,\n )\n fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy?\"\n@@ -319,7 +319,7 @@ def get_cluster_input():\n prefix = \"megatron_lm_\"\n megatron_lm_config[prefix + \"tp_degree\"] = _ask_field(\n \"What is the Tensor Parallelism degree/size? [1]:\",\n- lambda x: int(x),\n+ int,\n default=1,\n error_message=\"Please enter an integer.\",\n )\n@@ -333,14 +333,14 @@ def get_cluster_input():\n \n megatron_lm_config[prefix + \"pp_degree\"] = _ask_field(\n \"What is the Pipeline Parallelism degree/size? [1]:\",\n- lambda x: int(x),\n+ int,\n default=1,\n error_message=\"Please enter an integer.\",\n )\n if megatron_lm_config[prefix + \"pp_degree\"] > 1:\n megatron_lm_config[prefix + \"num_micro_batches\"] = _ask_field(\n \"What is the number of micro-batches? [1]:\",\n- lambda x: int(x),\n+ int,\n default=1,\n error_message=\"Please enter an integer.\",\n )\n@@ -362,7 +362,7 @@ def get_cluster_input():\n \n megatron_lm_config[prefix + \"gradient_clipping\"] = _ask_field(\n \"What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \",\n- lambda x: float(x),\n+ float,\n default=1.0,\n )\n \n@@ -438,14 +438,14 @@ def get_cluster_input():\n machine_type += \"(s)\"\n num_processes = _ask_field(\n f\"How many {machine_type} should be used for distributed training? [1]:\",\n- lambda x: int(x),\n+ int,\n default=1,\n error_message=\"Please enter an integer.\",\n )\n elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\n num_processes = _ask_field(\n \"How many GPU(s) should be used for distributed training? [1]:\",\n- lambda x: int(x),\n+ int,\n default=1,\n error_message=\"Please enter an integer.\",\n )\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex 22264b41f..af4195f29 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -97,7 +97,7 @@ def get_sagemaker_input():\n credentials_configuration = _ask_options(\n \"How do you want to authorize?\",\n [\"AWS Profile\", \"Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) \"],\n- lambda x: int(x),\n+ int,\n )\n aws_profile = None\n if credentials_configuration == 0:\n@@ -120,7 +120,7 @@ def get_sagemaker_input():\n role_management = _ask_options(\n \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?\",\n [\"Provide IAM Role name\", \"Create new IAM role using credentials\"],\n- lambda x: int(x),\n+ int,\n )\n if role_management == 0:\n iam_role_name = _ask_field(\"Enter your IAM role name: \")\n@@ -206,13 +206,10 @@ def get_sagemaker_input():\n ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default=\"ml.p3.2xlarge\")\n \n num_machines = 1\n- if (\n- distributed_type == SageMakerDistributedType.DATA_PARALLEL\n- or distributed_type == SageMakerDistributedType.MODEL_PARALLEL\n- ):\n+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):\n num_machines = _ask_field(\n \"How many machines do you want use? [1]: \",\n- lambda x: int(x),\n+ int,\n default=1,\n )\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 8eefb9d7f..b2d95bad8 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -863,7 +863,7 @@ def _infer_type(s):\n parser.add_argument(argument, action=action)\n \n return {\n- key: (literal_eval(value) if value == \"True\" or value == \"False\" else value)\n+ key: (literal_eval(value) if value in (\"True\", \"False\") else value)\n for key, value in parser.parse_args(nargs).__dict__.items()\n }\n \ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 4f3930e1c..8a45856cd 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import math\n+from contextlib import suppress\n from typing import List, Optional, Union\n \n import torch\n@@ -364,12 +365,10 @@ def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.synchronized_generator)\n self.gradient_state._set_end_of_dataloader(False)\n- try:\n+ # We can safely pass because the default is -1\n+ with suppress(Exception):\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n self.gradient_state._set_remainder(length % self.total_batch_size)\n- except Exception:\n- # We can safely pass because the default is -1\n- pass\n dataloader_iter = super().__iter__()\n # We iterate one batch ahead to check when we are at the end\n try:\n@@ -448,12 +447,10 @@ def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = Fals\n self.gradient_state = GradientState()\n self.state = AcceleratorState()\n self._drop_last = _drop_last\n- try:\n+ # We can safely pass because the default is -1\n+ with suppress(Exception):\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n self.gradient_state._set_remainder(length % self.total_batch_size)\n- except Exception:\n- # We can safely pass because the default is -1\n- pass\n \n def _fetch_batches(self, iterator):\n batches, batch = None, None\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 689ee5033..75694ba72 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -593,7 +593,7 @@ def filter_trackers(\n if getattr(tracker_init, \"requires_logging_directory\"):\n if logging_dir is None:\n raise ValueError(\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"\n )\n loggers.append(log_type)\n else:\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex bb14078d4..50013c4d5 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -228,7 +228,7 @@ def __str__(self):\n @classmethod\n def list(cls):\n \"Method to list all the possible items in `cls`\"\n- return list(map(lambda item: str(item), cls))\n+ return list(map(str, cls))\n \n \n class LoggerType(BaseEnum):\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex c65df08dc..1a4dcb4eb 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -88,7 +88,7 @@ def __call__(self, index, *args):\n store=torch.distributed.FileStore(rdv_file, world_size),\n world_size=world_size,\n )\n- elif self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n+ elif self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):\n # Prepare the environment for torch.distributed\n os.environ[\"LOCAL_RANK\"] = str(index)\n os.environ[\"RANK\"] = str(index)\ndiff --git a/src/accelerate/utils/megatron_lm.py b/src/accelerate/utils/megatron_lm.py\nindex afee2fcca..ab3bd8c37 100644\n--- a/src/accelerate/utils/megatron_lm.py\n+++ b/src/accelerate/utils/megatron_lm.py\n@@ -132,7 +132,7 @@ def prepare_model(accelerator):\n custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function\n model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func)\n else:\n- if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\n+ if args.model_type_name in (\"bert\", \"gpt\"):\n model_type = ModelType.encoder_or_decoder\n elif args.model_type_name == \"t5\":\n model_type = ModelType.encoder_and_decoder\n@@ -566,7 +566,7 @@ def loss_func_finetune(labels, logits):\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n+ elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)):\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))\n else:\n", "code_comments": [ { "body": "Disagree with this change here. It's more readable to explicitly have the `.keys()`.", "diff_hunk": "@@ -98,7 +98,7 @@ def __call__(self, parser, namespace, values, option_string=None):\n \"optional arguments\",\n ]\n if len(args) > 1:\n- used_platforms = [arg for arg in args if arg in options_to_group.keys()]\n+ used_platforms = [arg for arg in args if arg in options_to_group]", "from_author": false }, { "body": "Not convinced this is more readable.", "diff_hunk": "@@ -245,7 +245,7 @@ def dispatch_model(\n check_device_map(model, device_map)\n \n if main_device is None:\n- if set(device_map.values()) == {\"cpu\"} or set(device_map.values()) == {\"cpu\", \"disk\"}:\n+ if set(device_map.values()) in ({\"cpu\"}, {\"cpu\", \"disk\"}):", "from_author": false }, { "body": "While this is technically equivalent, I prefer the explicit `else`.", "diff_hunk": "@@ -77,8 +77,7 @@ def handle_input(cls):\n if handler:\n cls.current_selection = char\n return handler(cls)\n- else:\n- return None\n+ return None", "from_author": false }, { "body": "I prefer the explicit `else`.", "diff_hunk": "@@ -164,9 +165,8 @@ def __len__(self):\n elif self.even_batches:\n # When we even batches we always get +1\n return length + 1\n- else:\n- # Otherwise it depends on the process index.\n- return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length\n+ # Otherwise it depends on the process index.\n+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -402,8 +400,7 @@ def total_batch_size(self):\n def total_dataset_length(self):\n if hasattr(\"total_length\", self.dataset):\n return self.dataset.total_length\n- else:\n- return len(self.dataset)\n+ return len(self.dataset)", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -548,8 +543,7 @@ def __len__(self):\n return whole_length\n elif self._drop_last:\n return whole_length // self.state.num_processes\n- else:\n- return math.ceil(whole_length / self.state.num_processes)\n+ return math.ceil(whole_length / self.state.num_processes)", "from_author": false }, { "body": "While this is equivalent, `del values[names]` is explicit about what it's doing. `values.pop(name)` is not, so this should really be reverted.", "diff_hunk": "@@ -500,7 +500,7 @@ def store_init_configuration(self, values: dict):\n f'Trainer is attempting to log a value of \"{value}\" for key \"{name}\" as a parameter. MLflow\\'s'\n f\" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute.\"\n )\n- del values[name]\n+ values.pop(name)", "from_author": false }, { "body": "This is not equivalent, so this change should be reverted (f-strings use the repr, `str` does not).", "diff_hunk": "@@ -593,7 +593,7 @@ def filter_trackers(\n if getattr(tracker_init, \"requires_logging_directory\"):\n if logging_dir is None:\n raise ValueError(\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"", "from_author": false }, { "body": "Leaving the else and return is clearer.", "diff_hunk": "@@ -538,8 +538,7 @@ def get_batch_transformer(data_iterator):\n \n if megatron_dataset_flag:\n return get_batch_megatron\n- else:\n- return get_batch_transformer\n+ return get_batch_transformer", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -577,8 +576,7 @@ def loss_func_finetune(labels, logits):\n \n if pretraining_flag:\n return loss_func_pretrain\n- else:\n- return loss_func_finetune\n+ return loss_func_finetune", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -666,8 +664,7 @@ def get_batch_transformer(data_iterator):\n \n if megatron_dataset_flag:\n return get_batch_megatron\n- else:\n- return get_batch_transformer\n+ return get_batch_transformer", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -803,8 +800,7 @@ def get_batch_transformer(data_iterator):\n \n if megatron_dataset_flag:\n return get_batch_megatron\n- else:\n- return get_batch_transformer\n+ return get_batch_transformer", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -1118,8 +1114,7 @@ def eval_step(self, **batch_data):\n else:\n loss_reduced[key] = torch.concat(losses_reduced_for_key)\n return loss_reduced\n- else:\n- return {}\n+ return {}", "from_author": false }, { "body": "As above, this should be reverted.", "diff_hunk": "@@ -304,7 +304,7 @@ def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], modul\n values = [v for k, v in device_map.items() if k.startswith(prefix)]\n if len(set(values)) == 1 and len(values) > 1:\n for k in [k for k in device_map if k.startswith(prefix)]:\n- del device_map[k]\n+ device_map.pop(k)", "from_author": false }, { "body": "Let's leave the else return.", "diff_hunk": "@@ -225,8 +225,7 @@ def gather(tensor):\n return _gpu_gather(tensor)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather(tensor)\n- else:\n- return tensor\n+ return tensor", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -297,8 +296,7 @@ def broadcast(tensor, from_process: int = 0):\n return _gpu_broadcast(tensor, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _gpu_broadcast(tensor, src=from_process)\n- else:\n- return tensor\n+ return tensor", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -439,8 +437,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\n else:\n if reduction == \"sum\":\n return cloned_tensor.sum()\n- else:\n- return cloned_tensor.mean()\n+ return cloned_tensor.mean()", "from_author": false }, { "body": "I'd leave the `.keys()` as it's more readable this way.", "diff_hunk": "@@ -40,7 +40,7 @@ def compare_versions(library_or_version: Union[str, Version], operation: str, re\n requirement_version (`str`):\n The version to compare the library version against\n \"\"\"\n- if operation not in STR_OPERATION_TO_FUNC.keys():\n+ if operation not in STR_OPERATION_TO_FUNC:", "from_author": false }, { "body": "For the record I quickly tested it and it seems that it actually uses `__str__`, not `__rep__`. \r\n\r\n```py\r\nclass A:\r\n def __str__(self):\r\n return \"Use str\"\r\n\r\n def __repr__(self):\r\n return \"Use repr\"\r\n\r\na = A()\r\nprint(a)\r\nprint(\"%s\" % a)\r\nprint(f\"{a}\")\r\nprint(f\"{str(a)}\")\r\nprint(\"{}\".format(a))\r\n```\r\n\r\nOutput (both in python 3.7 and 3.10)\r\n```\r\nUse str\r\nUse str\r\nUse str\r\nUse str\r\nUse str\r\n```", "diff_hunk": "@@ -593,7 +593,7 @@ def filter_trackers(\n if getattr(tracker_init, \"requires_logging_directory\"):\n if logging_dir is None:\n raise ValueError(\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"", "from_author": false }, { "body": "Oh! My bad then.", "diff_hunk": "@@ -593,7 +593,7 @@ def filter_trackers(\n if getattr(tracker_init, \"requires_logging_directory\"):\n if logging_dir is None:\n raise ValueError(\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"", "from_author": false }, { "body": "It is actually a TIL for me, didn't know what to expect :)", "diff_hunk": "@@ -593,7 +593,7 @@ def filter_trackers(\n if getattr(tracker_init, \"requires_logging_directory\"):\n if logging_dir is None:\n raise ValueError(\n- f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/865", "pr_id": 1126156577 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 6ac8d8d3a..d6a323559 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -953,7 +953,9 @@ def _prepare_deepspeed(self, *args):\n batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n if len(batch_sizes) == 0:\n raise ValueError(\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ \"When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders \"\n+ \"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file\"\n+ \"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.\"\n )\n \n batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/861", "pr_id": 1125476819 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 7a9ac92f4..6ac8d8d3a 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -1464,7 +1464,7 @@ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\n \"\"\"\n return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)\n \n- def unwrap_model(self, model):\n+ def unwrap_model(self, model, keep_fp32_wrapper: bool = False):\n \"\"\"\n Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving\n the model.\n@@ -1472,8 +1472,10 @@ def unwrap_model(self, model):\n Args:\n model (`torch.nn.Module`):\n The model to unwrap.\n+ keep_fp32_wrapper (`bool`, *optional*, defaults to `False`):\n+ Whether to not remove the mixed precision hook if it was added.\n \"\"\"\n- return extract_model_from_parallel(model)\n+ return extract_model_from_parallel(model, keep_fp32_wrapper)\n \n def wait_for_everyone(self):\n \"\"\"\n@@ -1760,7 +1762,7 @@ def get_state_dict(self, model, unwrap=True):\n Args:\n model (`torch.nn.Module`):\n A PyTorch model sent through [`Accelerator.prepare`]\n- unwrap (`bool`, *optional*, defaults to True):\n+ unwrap (`bool`, *optional*, defaults to `True`):\n Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict\n \"\"\"\n is_zero_3 = False\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex a9b6f9d4b..a4a7b6312 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -21,6 +21,7 @@\n from ..state import AcceleratorState\n from .dataclasses import DistributedType\n from .imports import is_deepspeed_available, is_tpu_available\n+from .operations import ConvertOutputsToFp32\n \n \n if is_deepspeed_available():\n@@ -30,12 +31,15 @@\n import torch_xla.core.xla_model as xm\n \n \n-def extract_model_from_parallel(model):\n+def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False):\n \"\"\"\n Extract a model from its distributed containers.\n \n Args:\n- model (`torch.nn.Module`): The model to extract.\n+ model (`torch.nn.Module`):\n+ The model to extract.\n+ keep_fp32_wrapper (`bool`, *optional*):\n+ Whether to remove mixed precision hooks from the model.\n \n Returns:\n `torch.nn.Module`: The extracted model.\n@@ -46,6 +50,11 @@ def extract_model_from_parallel(model):\n \n while isinstance(model, options):\n model = model.module\n+\n+ if not keep_fp32_wrapper:\n+ forward = getattr(model, \"forward\")\n+ if isinstance(forward, ConvertOutputsToFp32):\n+ setattr(model, \"forward\", forward.model_forward)\n return model\n \n \n", "code_comments": [ { "body": "```suggestion\r\n the model.\r\n```\r\nAdd this sentence when documenting the argument below instead.", "diff_hunk": "@@ -1464,16 +1464,16 @@ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\n \"\"\"\n return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)\n \n- def unwrap_model(self, model):\n+ def unwrap_model(self, model, keep_fp32_wrapper: bool = False):\n \"\"\"\n Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving\n- the model.\n+ the model. Will also remove the mixed precision hook if it was added by default.", "from_author": false } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_860). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_860). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_860). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/860", "pr_id": 1125035113 }, { "diff": "diff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\nindex 46118f7c6..2eceb223e 100644\n--- a/src/accelerate/logging.py\n+++ b/src/accelerate/logging.py\n@@ -78,7 +78,7 @@ def get_logger(name: str, log_level: str = None):\n ```\n \"\"\"\n if log_level is None:\n- log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"INFO\")\n+ log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"WARNING\")\n logger = logging.getLogger(name)\n logging.basicConfig(level=log_level.upper())\n return MultiProcessAdapter(logger, {})\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_859). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/859", "pr_id": 1124948804 }, { "diff": "diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml\nindex 12c600cb7..dc56751c6 100644\n--- a/.github/workflows/build_pr_documentation.yml\n+++ b/.github/workflows/build_pr_documentation.yml\n@@ -9,11 +9,8 @@ concurrency:\n \n jobs:\n build:\n- uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@use_hf_hub\n+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main\n with:\n commit_sha: ${{ github.event.pull_request.head.sha }}\n pr_number: ${{ github.event.number }}\n package: accelerate\n- secrets:\n- token: ${{ secrets.HF_DOC_PUSH }}\n- comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\ndiff --git a/.github/workflows/delete_doc_comment.yml b/.github/workflows/delete_doc_comment.yml\nindex 973c2a8b3..da61d21df 100644\n--- a/.github/workflows/delete_doc_comment.yml\n+++ b/.github/workflows/delete_doc_comment.yml\n@@ -7,10 +7,7 @@ on:\n \n jobs:\n delete:\n- uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@use_hf_hub\n+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main\n with:\n pr_number: ${{ github.event.number }}\n package: accelerate\n- secrets:\n- token: ${{ secrets.HF_DOC_PUSH }}\n- comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_857). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/857", "pr_id": 1124252364 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex eafec816e..3d72aada6 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -46,7 +46,7 @@ Create a default config file for Accelerate with only a few flags set.\n **Usage**: \n \n ```bash\n-accelerate default-config [arguments]\n+accelerate config default [arguments]\n ```\n \n **Optional Arguments**:\n@@ -57,6 +57,28 @@ accelerate default-config [arguments]\n * `-h`, `--help` (`bool`) -- Show a help message and exit\n * `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\n \n+## accelerate config update\n+\n+**Command**:\n+\n+`accelerate config update` or `accelerate-config update`\n+\n+Update an existing config file with the latest defaults while maintaining the old configuration.\n+\n+**Usage**: \n+\n+```bash\n+accelerate config update [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content\n+ of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n+\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+\n+\n ## accelerate env\n \n **Command**:\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex ba9d6cf30..9b1545982 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -16,27 +16,22 @@\n \n import argparse\n \n-from .config import config_command, config_command_parser\n+from .config import config_command_parser\n from .config_args import default_config_file, load_config_from_file # noqa: F401\n-from .default import default_command_parser, default_config_command\n-\n-\n-def filter_command_args(args: dict, args_prefix: str):\n- \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\n- new_args = argparse.Namespace()\n- for key, value in vars(args).items():\n- if key.startswith(args_prefix):\n- setattr(new_args, key.replace(f\"{args_prefix}.\", \"\"), value)\n- return new_args\n+from .default import default_command_parser\n+from .update import update_command_parser\n \n \n def get_config_parser(subparsers=None):\n parent_parser = argparse.ArgumentParser(add_help=False)\n # The main config parser\n config_parser = config_command_parser(subparsers)\n+ # The subparser to add commands to\n+ subcommands = config_parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\n \n # Then add other parsers with the parent parser\n- default_parser = default_command_parser(config_parser, parents=[parent_parser]) # noqa: F841\n+ default_command_parser(subcommands, parents=[parent_parser])\n+ update_command_parser(subcommands, parents=[parent_parser])\n \n return config_parser\n \n@@ -44,12 +39,13 @@ def get_config_parser(subparsers=None):\n def main():\n config_parser = get_config_parser()\n args = config_parser.parse_args()\n- if not args.default:\n- args = filter_command_args(args, \"config_args\")\n- config_command(args)\n- elif args.default:\n- args = filter_command_args(args, \"default_args\")\n- default_config_command(args)\n+\n+ if not hasattr(args, \"func\"):\n+ config_parser.print_help()\n+ exit(1)\n+\n+ # Run\n+ args.func(args)\n \n \n if __name__ == \"__main__\":\ndiff --git a/src/accelerate/commands/config/config.py b/src/accelerate/commands/config/config.py\nindex b504f07ad..72414f2ab 100644\n--- a/src/accelerate/commands/config/config.py\n+++ b/src/accelerate/commands/config/config.py\n@@ -21,13 +21,7 @@\n \n from .cluster import get_cluster_input\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n-from .config_utils import ( # noqa: F401\n- GroupedAction,\n- SubcommandHelpFormatter,\n- _ask_field,\n- _ask_options,\n- _convert_compute_environment,\n-)\n+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\n from .sagemaker import get_sagemaker_input\n \n \n@@ -49,18 +43,13 @@ def get_user_input():\n \n def config_command_parser(subparsers=None):\n if subparsers is not None:\n- parser = subparsers.add_parser(\"config\", description=description, formatter_class=SubcommandHelpFormatter)\n+ parser = subparsers.add_parser(\"config\", description=description)\n else:\n- parser = argparse.ArgumentParser(\n- \"Accelerate config command\", description=description, formatter_class=SubcommandHelpFormatter\n- )\n+ parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\n \n parser.add_argument(\n \"--config_file\",\n default=None,\n- dest=\"config_args.config_file\",\n- metavar=\"CONFIG_FILE\",\n- action=GroupedAction,\n help=(\n \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n@@ -87,6 +76,7 @@ def config_command(args):\n config.to_json_file(config_file)\n else:\n config.to_yaml_file(config_file)\n+ print(f\"accelerate configuration saved at {config_file}\")\n \n \n def main():\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex 1deee9f32..736c3a3d5 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -88,29 +88,11 @@ def _convert_yes_no_to_bool(value):\n return {\"yes\": True, \"no\": False}[value.lower()]\n \n \n-class GroupedAction(argparse.Action):\n- \"\"\"\n- Filters arguments into seperate namespace groups based on the first part of the argument name.\n- \"\"\"\n-\n- def __call__(self, parser, namespace, values, option_string=None):\n- group, dest = self.dest.split(\".\", 2)\n- groupspace = getattr(namespace, group, argparse.Namespace())\n- setattr(groupspace, dest, values)\n- setattr(namespace, group, groupspace)\n-\n-\n class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):\n \"\"\"\n A custom formatter that will remove the usage line from the help message for subcommands.\n \"\"\"\n \n- def _format_action(self, action):\n- parts = super()._format_action(action)\n- if action.nargs == argparse.PARSER:\n- parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n- return parts\n-\n def _format_usage(self, usage, actions, groups, prefix):\n usage = super()._format_usage(usage, actions, groups, prefix)\n usage = usage.replace(\"<command> [<args>] \", \"\")\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\nindex b87a1d45b..f7ceb84ce 100644\n--- a/src/accelerate/commands/config/default.py\n+++ b/src/accelerate/commands/config/default.py\n@@ -14,13 +14,15 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import argparse\n from pathlib import Path\n \n import torch\n \n from .config_args import ClusterConfig, default_json_config_file\n-from .config_utils import GroupedAction\n+from .config_utils import SubcommandHelpFormatter\n+\n+\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\n \n \n def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\n@@ -42,7 +44,7 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n print(\n f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\n )\n- return\n+ return False\n mixed_precision = mixed_precision.lower()\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n raise ValueError(f\"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}\")\n@@ -64,20 +66,13 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n config[\"use_cpu\"] = True\n config[\"num_processes\"] = 1\n config[\"distributed_type\"] = \"NO\"\n- if not path.exists():\n- config = ClusterConfig(**config)\n- config.to_json_file(path)\n+ config = ClusterConfig(**config)\n+ config.to_json_file(path)\n+ return path\n \n \n-description = \"Create a default config file for Accelerate with only a few flags set.\"\n-\n-\n-def default_command_parser(parser=None, parents=None):\n- if parser is None and parents is None:\n- parser = argparse.ArgumentParser(description=description)\n- else:\n- default_parser = parser.add_subparsers(title=\"subcommand {default}\", dest=\"default\", description=description)\n- parser = default_parser.add_parser(\"default\", parents=parents)\n+def default_command_parser(parser, parents):\n+ parser = parser.add_parser(\"default\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\n parser.add_argument(\n \"--config_file\",\n default=default_json_config_file,\n@@ -87,9 +82,7 @@ def default_command_parser(parser=None, parents=None):\n \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n \"with 'huggingface'.\"\n ),\n- dest=\"default_args.save_location\",\n- metavar=\"CONFIG_FILE\",\n- action=GroupedAction,\n+ dest=\"save_location\",\n )\n \n parser.add_argument(\n@@ -100,14 +93,12 @@ def default_command_parser(parser=None, parents=None):\n \"Choose between FP16 and BF16 (bfloat16) training. \"\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n default=\"no\",\n- dest=\"default_args.mixed_precision\",\n- action=GroupedAction,\n )\n parser.set_defaults(func=default_config_command)\n return parser\n \n \n def default_config_command(args):\n- args = vars(args)\n- args.pop(\"func\", None)\n- write_basic_config(**args)\n+ config_file = write_basic_config(args.mixed_precision, args.save_location)\n+ if config_file:\n+ print(f\"accelerate configuration saved at {config_file}\")\ndiff --git a/src/accelerate/commands/config/update.py b/src/accelerate/commands/config/update.py\nnew file mode 100644\nindex 000000000..5f025594b\n--- /dev/null\n+++ b/src/accelerate/commands/config/update.py\n@@ -0,0 +1,63 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from pathlib import Path\n+\n+from .config_args import default_config_file, load_config_from_file\n+from .config_utils import SubcommandHelpFormatter\n+\n+\n+description = \"Update an existing config file with the latest defaults while maintaining the old configuration.\"\n+\n+\n+def update_config(args):\n+ \"\"\"\n+ Update an existing config file with the latest defaults while maintaining the old configuration.\n+ \"\"\"\n+ config_file = args.config_file\n+ if config_file is None and Path(default_config_file).exists():\n+ config_file = default_config_file\n+ elif not Path(config_file).exists():\n+ raise ValueError(f\"The passed config file located at {config_file} doesn't exist.\")\n+ config = load_config_from_file(config_file)\n+\n+ if config_file.endswith(\".json\"):\n+ config.to_json_file(config_file)\n+ else:\n+ config.to_yaml_file(config_file)\n+ return config_file\n+\n+\n+def update_command_parser(parser, parents):\n+ parser = parser.add_parser(\"update\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\n+ parser.add_argument(\n+ \"--config_file\",\n+ default=None,\n+ help=(\n+ \"The path to the config file to update. Will default to a file named default_config.yaml in the cache \"\n+ \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n+ \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n+ \"with 'huggingface'.\"\n+ ),\n+ )\n+\n+ parser.set_defaults(func=update_config_command)\n+ return parser\n+\n+\n+def update_config_command(args):\n+ config_file = update_config(args)\n+ print(f\"Sucessfully updated the configuration file at {config_file}.\")\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_855). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/855", "pr_id": 1123457165 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex eafec816e..3d72aada6 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -46,7 +46,7 @@ Create a default config file for Accelerate with only a few flags set.\n **Usage**: \n \n ```bash\n-accelerate default-config [arguments]\n+accelerate config default [arguments]\n ```\n \n **Optional Arguments**:\n@@ -57,6 +57,28 @@ accelerate default-config [arguments]\n * `-h`, `--help` (`bool`) -- Show a help message and exit\n * `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\n \n+## accelerate config update\n+\n+**Command**:\n+\n+`accelerate config update` or `accelerate-config update`\n+\n+Update an existing config file with the latest defaults while maintaining the old configuration.\n+\n+**Usage**: \n+\n+```bash\n+accelerate config update [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content\n+ of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n+\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+\n+\n ## accelerate env\n \n **Command**:\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex ba9d6cf30..9b1545982 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -16,27 +16,22 @@\n \n import argparse\n \n-from .config import config_command, config_command_parser\n+from .config import config_command_parser\n from .config_args import default_config_file, load_config_from_file # noqa: F401\n-from .default import default_command_parser, default_config_command\n-\n-\n-def filter_command_args(args: dict, args_prefix: str):\n- \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\n- new_args = argparse.Namespace()\n- for key, value in vars(args).items():\n- if key.startswith(args_prefix):\n- setattr(new_args, key.replace(f\"{args_prefix}.\", \"\"), value)\n- return new_args\n+from .default import default_command_parser\n+from .update import update_command_parser\n \n \n def get_config_parser(subparsers=None):\n parent_parser = argparse.ArgumentParser(add_help=False)\n # The main config parser\n config_parser = config_command_parser(subparsers)\n+ # The subparser to add commands to\n+ subcommands = config_parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\n \n # Then add other parsers with the parent parser\n- default_parser = default_command_parser(config_parser, parents=[parent_parser]) # noqa: F841\n+ default_command_parser(subcommands, parents=[parent_parser])\n+ update_command_parser(subcommands, parents=[parent_parser])\n \n return config_parser\n \n@@ -44,12 +39,13 @@ def get_config_parser(subparsers=None):\n def main():\n config_parser = get_config_parser()\n args = config_parser.parse_args()\n- if not args.default:\n- args = filter_command_args(args, \"config_args\")\n- config_command(args)\n- elif args.default:\n- args = filter_command_args(args, \"default_args\")\n- default_config_command(args)\n+\n+ if not hasattr(args, \"func\"):\n+ config_parser.print_help()\n+ exit(1)\n+\n+ # Run\n+ args.func(args)\n \n \n if __name__ == \"__main__\":\ndiff --git a/src/accelerate/commands/config/config.py b/src/accelerate/commands/config/config.py\nindex b504f07ad..72414f2ab 100644\n--- a/src/accelerate/commands/config/config.py\n+++ b/src/accelerate/commands/config/config.py\n@@ -21,13 +21,7 @@\n \n from .cluster import get_cluster_input\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n-from .config_utils import ( # noqa: F401\n- GroupedAction,\n- SubcommandHelpFormatter,\n- _ask_field,\n- _ask_options,\n- _convert_compute_environment,\n-)\n+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\n from .sagemaker import get_sagemaker_input\n \n \n@@ -49,18 +43,13 @@ def get_user_input():\n \n def config_command_parser(subparsers=None):\n if subparsers is not None:\n- parser = subparsers.add_parser(\"config\", description=description, formatter_class=SubcommandHelpFormatter)\n+ parser = subparsers.add_parser(\"config\", description=description)\n else:\n- parser = argparse.ArgumentParser(\n- \"Accelerate config command\", description=description, formatter_class=SubcommandHelpFormatter\n- )\n+ parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\n \n parser.add_argument(\n \"--config_file\",\n default=None,\n- dest=\"config_args.config_file\",\n- metavar=\"CONFIG_FILE\",\n- action=GroupedAction,\n help=(\n \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n@@ -87,6 +76,7 @@ def config_command(args):\n config.to_json_file(config_file)\n else:\n config.to_yaml_file(config_file)\n+ print(f\"accelerate configuration saved at {config_file}\")\n \n \n def main():\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex 1deee9f32..736c3a3d5 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -88,29 +88,11 @@ def _convert_yes_no_to_bool(value):\n return {\"yes\": True, \"no\": False}[value.lower()]\n \n \n-class GroupedAction(argparse.Action):\n- \"\"\"\n- Filters arguments into seperate namespace groups based on the first part of the argument name.\n- \"\"\"\n-\n- def __call__(self, parser, namespace, values, option_string=None):\n- group, dest = self.dest.split(\".\", 2)\n- groupspace = getattr(namespace, group, argparse.Namespace())\n- setattr(groupspace, dest, values)\n- setattr(namespace, group, groupspace)\n-\n-\n class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):\n \"\"\"\n A custom formatter that will remove the usage line from the help message for subcommands.\n \"\"\"\n \n- def _format_action(self, action):\n- parts = super()._format_action(action)\n- if action.nargs == argparse.PARSER:\n- parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n- return parts\n-\n def _format_usage(self, usage, actions, groups, prefix):\n usage = super()._format_usage(usage, actions, groups, prefix)\n usage = usage.replace(\"<command> [<args>] \", \"\")\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\nindex b87a1d45b..f7ceb84ce 100644\n--- a/src/accelerate/commands/config/default.py\n+++ b/src/accelerate/commands/config/default.py\n@@ -14,13 +14,15 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import argparse\n from pathlib import Path\n \n import torch\n \n from .config_args import ClusterConfig, default_json_config_file\n-from .config_utils import GroupedAction\n+from .config_utils import SubcommandHelpFormatter\n+\n+\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\n \n \n def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\n@@ -42,7 +44,7 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n print(\n f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\n )\n- return\n+ return False\n mixed_precision = mixed_precision.lower()\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n raise ValueError(f\"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}\")\n@@ -64,20 +66,13 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n config[\"use_cpu\"] = True\n config[\"num_processes\"] = 1\n config[\"distributed_type\"] = \"NO\"\n- if not path.exists():\n- config = ClusterConfig(**config)\n- config.to_json_file(path)\n+ config = ClusterConfig(**config)\n+ config.to_json_file(path)\n+ return path\n \n \n-description = \"Create a default config file for Accelerate with only a few flags set.\"\n-\n-\n-def default_command_parser(parser=None, parents=None):\n- if parser is None and parents is None:\n- parser = argparse.ArgumentParser(description=description)\n- else:\n- default_parser = parser.add_subparsers(title=\"subcommand {default}\", dest=\"default\", description=description)\n- parser = default_parser.add_parser(\"default\", parents=parents)\n+def default_command_parser(parser, parents):\n+ parser = parser.add_parser(\"default\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\n parser.add_argument(\n \"--config_file\",\n default=default_json_config_file,\n@@ -87,9 +82,7 @@ def default_command_parser(parser=None, parents=None):\n \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n \"with 'huggingface'.\"\n ),\n- dest=\"default_args.save_location\",\n- metavar=\"CONFIG_FILE\",\n- action=GroupedAction,\n+ dest=\"save_location\",\n )\n \n parser.add_argument(\n@@ -100,14 +93,12 @@ def default_command_parser(parser=None, parents=None):\n \"Choose between FP16 and BF16 (bfloat16) training. \"\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n default=\"no\",\n- dest=\"default_args.mixed_precision\",\n- action=GroupedAction,\n )\n parser.set_defaults(func=default_config_command)\n return parser\n \n \n def default_config_command(args):\n- args = vars(args)\n- args.pop(\"func\", None)\n- write_basic_config(**args)\n+ config_file = write_basic_config(args.mixed_precision, args.save_location)\n+ if config_file:\n+ print(f\"accelerate configuration saved at {config_file}\")\ndiff --git a/src/accelerate/commands/config/update.py b/src/accelerate/commands/config/update.py\nnew file mode 100644\nindex 000000000..5f025594b\n--- /dev/null\n+++ b/src/accelerate/commands/config/update.py\n@@ -0,0 +1,63 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from pathlib import Path\n+\n+from .config_args import default_config_file, load_config_from_file\n+from .config_utils import SubcommandHelpFormatter\n+\n+\n+description = \"Update an existing config file with the latest defaults while maintaining the old configuration.\"\n+\n+\n+def update_config(args):\n+ \"\"\"\n+ Update an existing config file with the latest defaults while maintaining the old configuration.\n+ \"\"\"\n+ config_file = args.config_file\n+ if config_file is None and Path(default_config_file).exists():\n+ config_file = default_config_file\n+ elif not Path(config_file).exists():\n+ raise ValueError(f\"The passed config file located at {config_file} doesn't exist.\")\n+ config = load_config_from_file(config_file)\n+\n+ if config_file.endswith(\".json\"):\n+ config.to_json_file(config_file)\n+ else:\n+ config.to_yaml_file(config_file)\n+ return config_file\n+\n+\n+def update_command_parser(parser, parents):\n+ parser = parser.add_parser(\"update\", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)\n+ parser.add_argument(\n+ \"--config_file\",\n+ default=None,\n+ help=(\n+ \"The path to the config file to update. Will default to a file named default_config.yaml in the cache \"\n+ \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n+ \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n+ \"with 'huggingface'.\"\n+ ),\n+ )\n+\n+ parser.set_defaults(func=update_config_command)\n+ return parser\n+\n+\n+def update_config_command(args):\n+ config_file = update_config(args)\n+ print(f\"Sucessfully updated the configuration file at {config_file}.\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "The DeepSpeed CI failures are unrelated and look like a startup issue", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/853", "pr_id": 1123180567 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex 562e1f4a4..eafec816e 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -35,11 +35,11 @@ accelerate config [arguments]\n (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n * `-h`, `--help` (`bool`) -- Show a help message and exit\n \n-## accelerate default-config \n+## accelerate config default\n \n **Command**:\n \n-`accelerate default-config` or `accelerate-default-config`\n+`accelerate config default` or `accelerate-config default`\n \n Create a default config file for Accelerate with only a few flags set.\n \ndiff --git a/setup.py b/setup.py\nindex 622d6de69..f96ebc440 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -48,7 +48,6 @@\n \"accelerate=accelerate.commands.accelerate_cli:main\",\n \"accelerate-config=accelerate.commands.config:main\",\n \"accelerate-launch=accelerate.commands.launch:main\",\n- \"accelerate-default-config=accelerate.commands.config.default:main\",\n ]\n },\n python_requires=\">=3.7.0\",\ndiff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\nindex 8ffda3572..7716526c5 100644\n--- a/src/accelerate/commands/accelerate_cli.py\n+++ b/src/accelerate/commands/accelerate_cli.py\n@@ -16,8 +16,7 @@\n \n from argparse import ArgumentParser\n \n-from accelerate.commands.config import config_command_parser\n-from accelerate.commands.config.default import default_command_parser\n+from accelerate.commands.config import get_config_parser\n from accelerate.commands.env import env_command_parser\n from accelerate.commands.launch import launch_command_parser\n from accelerate.commands.test import test_command_parser\n@@ -29,8 +28,7 @@ def main():\n subparsers = parser.add_subparsers(help=\"accelerate command helpers\")\n \n # Register commands\n- config_command_parser(subparsers=subparsers)\n- default_command_parser(subparsers=subparsers)\n+ get_config_parser(subparsers=subparsers)\n env_command_parser(subparsers=subparsers)\n launch_command_parser(subparsers=subparsers)\n tpu_command_parser(subparsers=subparsers)\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex 2400555c4..ba9d6cf30 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -15,73 +15,41 @@\n # limitations under the License.\n \n import argparse\n-import os\n \n-from accelerate.utils import ComputeEnvironment\n+from .config import config_command, config_command_parser\n+from .config_args import default_config_file, load_config_from_file # noqa: F401\n+from .default import default_command_parser, default_config_command\n \n-from .cluster import get_cluster_input\n-from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n-from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\n-from .sagemaker import get_sagemaker_input\n \n+def filter_command_args(args: dict, args_prefix: str):\n+ \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\n+ new_args = argparse.Namespace()\n+ for key, value in vars(args).items():\n+ if key.startswith(args_prefix):\n+ setattr(new_args, key.replace(f\"{args_prefix}.\", \"\"), value)\n+ return new_args\n \n-description = \"Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine\"\n \n+def get_config_parser(subparsers=None):\n+ parent_parser = argparse.ArgumentParser(add_help=False)\n+ # The main config parser\n+ config_parser = config_command_parser(subparsers)\n \n-def get_user_input():\n- compute_environment = _ask_options(\n- \"In which compute environment are you running?\",\n- [\"This machine\", \"AWS (Amazon SageMaker)\"],\n- _convert_compute_environment,\n- )\n- if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n- config = get_sagemaker_input()\n- else:\n- config = get_cluster_input()\n- return config\n+ # Then add other parsers with the parent parser\n+ default_parser = default_command_parser(config_parser, parents=[parent_parser]) # noqa: F841\n \n-\n-def config_command_parser(subparsers=None):\n- if subparsers is not None:\n- parser = subparsers.add_parser(\"config\", description=description)\n- else:\n- parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\n-\n- parser.add_argument(\n- \"--config_file\",\n- default=None,\n- help=(\n- \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n- \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n- \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n- \"with 'huggingface'.\"\n- ),\n- )\n-\n- if subparsers is not None:\n- parser.set_defaults(func=config_command)\n- return parser\n-\n-\n-def config_command(args):\n- config = get_user_input()\n- if args.config_file is not None:\n- config_file = args.config_file\n- else:\n- if not os.path.isdir(cache_dir):\n- os.makedirs(cache_dir)\n- config_file = default_yaml_config_file\n-\n- if config_file.endswith(\".json\"):\n- config.to_json_file(config_file)\n- else:\n- config.to_yaml_file(config_file)\n+ return config_parser\n \n \n def main():\n- parser = config_command_parser()\n- args = parser.parse_args()\n- config_command(args)\n+ config_parser = get_config_parser()\n+ args = config_parser.parse_args()\n+ if not args.default:\n+ args = filter_command_args(args, \"config_args\")\n+ config_command(args)\n+ elif args.default:\n+ args = filter_command_args(args, \"default_args\")\n+ default_config_command(args)\n \n \n if __name__ == \"__main__\":\ndiff --git a/src/accelerate/commands/config/config.py b/src/accelerate/commands/config/config.py\nnew file mode 100644\nindex 000000000..b504f07ad\n--- /dev/null\n+++ b/src/accelerate/commands/config/config.py\n@@ -0,0 +1,99 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+\n+from accelerate.utils import ComputeEnvironment\n+\n+from .cluster import get_cluster_input\n+from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n+from .config_utils import ( # noqa: F401\n+ GroupedAction,\n+ SubcommandHelpFormatter,\n+ _ask_field,\n+ _ask_options,\n+ _convert_compute_environment,\n+)\n+from .sagemaker import get_sagemaker_input\n+\n+\n+description = \"Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine\"\n+\n+\n+def get_user_input():\n+ compute_environment = _ask_options(\n+ \"In which compute environment are you running?\",\n+ [\"This machine\", \"AWS (Amazon SageMaker)\"],\n+ _convert_compute_environment,\n+ )\n+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n+ config = get_sagemaker_input()\n+ else:\n+ config = get_cluster_input()\n+ return config\n+\n+\n+def config_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"config\", description=description, formatter_class=SubcommandHelpFormatter)\n+ else:\n+ parser = argparse.ArgumentParser(\n+ \"Accelerate config command\", description=description, formatter_class=SubcommandHelpFormatter\n+ )\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ default=None,\n+ dest=\"config_args.config_file\",\n+ metavar=\"CONFIG_FILE\",\n+ action=GroupedAction,\n+ help=(\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n+ \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n+ \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n+ \"with 'huggingface'.\"\n+ ),\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=config_command)\n+ return parser\n+\n+\n+def config_command(args):\n+ config = get_user_input()\n+ if args.config_file is not None:\n+ config_file = args.config_file\n+ else:\n+ if not os.path.isdir(cache_dir):\n+ os.makedirs(cache_dir)\n+ config_file = default_yaml_config_file\n+\n+ if config_file.endswith(\".json\"):\n+ config.to_json_file(config_file)\n+ else:\n+ config.to_yaml_file(config_file)\n+\n+\n+def main():\n+ parser = config_command_parser()\n+ args = parser.parse_args()\n+ config_command(args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex def29fe06..1deee9f32 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -14,6 +14,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import argparse\n+\n from ...utils.dataclasses import (\n ComputeEnvironment,\n DistributedType,\n@@ -84,3 +86,32 @@ def _convert_sagemaker_distributed_mode(value):\n \n def _convert_yes_no_to_bool(value):\n return {\"yes\": True, \"no\": False}[value.lower()]\n+\n+\n+class GroupedAction(argparse.Action):\n+ \"\"\"\n+ Filters arguments into seperate namespace groups based on the first part of the argument name.\n+ \"\"\"\n+\n+ def __call__(self, parser, namespace, values, option_string=None):\n+ group, dest = self.dest.split(\".\", 2)\n+ groupspace = getattr(namespace, group, argparse.Namespace())\n+ setattr(groupspace, dest, values)\n+ setattr(namespace, group, groupspace)\n+\n+\n+class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):\n+ \"\"\"\n+ A custom formatter that will remove the usage line from the help message for subcommands.\n+ \"\"\"\n+\n+ def _format_action(self, action):\n+ parts = super()._format_action(action)\n+ if action.nargs == argparse.PARSER:\n+ parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n+ return parts\n+\n+ def _format_usage(self, usage, actions, groups, prefix):\n+ usage = super()._format_usage(usage, actions, groups, prefix)\n+ usage = usage.replace(\"<command> [<args>] \", \"\")\n+ return usage\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\nindex 956c3d719..b87a1d45b 100644\n--- a/src/accelerate/commands/config/default.py\n+++ b/src/accelerate/commands/config/default.py\n@@ -15,21 +15,69 @@\n # limitations under the License.\n \n import argparse\n-\n-from accelerate.utils import write_basic_config\n-\n-from .config_args import default_json_config_file\n+from pathlib import Path\n+\n+import torch\n+\n+from .config_args import ClusterConfig, default_json_config_file\n+from .config_utils import GroupedAction\n+\n+\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\n+ \"\"\"\n+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n+ set CPU if it is a CPU-only machine.\n+\n+ Args:\n+ mixed_precision (`str`, *optional*, defaults to \"no\"):\n+ Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n+ save_location (`str`, *optional*, defaults to `default_json_config_file`):\n+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default\n+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting\n+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.\n+ \"\"\"\n+ path = Path(save_location)\n+ path.parent.mkdir(parents=True, exist_ok=True)\n+ if path.exists():\n+ print(\n+ f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\n+ )\n+ return\n+ mixed_precision = mixed_precision.lower()\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(f\"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}\")\n+ config = {\n+ \"compute_environment\": \"LOCAL_MACHINE\",\n+ \"mixed_precision\": mixed_precision,\n+ \"dynamo_backend\": dynamo_backend,\n+ }\n+ if torch.cuda.is_available():\n+ num_gpus = torch.cuda.device_count()\n+ config[\"num_processes\"] = num_gpus\n+ config[\"use_cpu\"] = False\n+ if num_gpus > 1:\n+ config[\"distributed_type\"] = \"MULTI_GPU\"\n+ else:\n+ config[\"distributed_type\"] = \"NO\"\n+ else:\n+ num_gpus = 0\n+ config[\"use_cpu\"] = True\n+ config[\"num_processes\"] = 1\n+ config[\"distributed_type\"] = \"NO\"\n+ if not path.exists():\n+ config = ClusterConfig(**config)\n+ config.to_json_file(path)\n \n \n description = \"Create a default config file for Accelerate with only a few flags set.\"\n \n \n-def default_command_parser(subparsers=None):\n- if subparsers is not None:\n- parser = subparsers.add_parser(\"default-config\", description=description)\n+def default_command_parser(parser=None, parents=None):\n+ if parser is None and parents is None:\n+ parser = argparse.ArgumentParser(description=description)\n else:\n- parser = argparse.ArgumentParser(\"Accelerate default-config command\", description=description)\n-\n+ default_parser = parser.add_subparsers(title=\"subcommand {default}\", dest=\"default\", description=description)\n+ parser = default_parser.add_parser(\"default\", parents=parents)\n parser.add_argument(\n \"--config_file\",\n default=default_json_config_file,\n@@ -39,7 +87,9 @@ def default_command_parser(subparsers=None):\n \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n \"with 'huggingface'.\"\n ),\n- dest=\"save_location\",\n+ dest=\"default_args.save_location\",\n+ metavar=\"CONFIG_FILE\",\n+ action=GroupedAction,\n )\n \n parser.add_argument(\n@@ -50,24 +100,14 @@ def default_command_parser(subparsers=None):\n \"Choose between FP16 and BF16 (bfloat16) training. \"\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n default=\"no\",\n+ dest=\"default_args.mixed_precision\",\n+ action=GroupedAction,\n )\n-\n- if subparsers is not None:\n- parser.set_defaults(func=config_command)\n+ parser.set_defaults(func=default_config_command)\n return parser\n \n \n-def config_command(args):\n+def default_config_command(args):\n args = vars(args)\n args.pop(\"func\", None)\n write_basic_config(**args)\n-\n-\n-def main():\n- parser = default_command_parser()\n- args = parser.parse_args()\n- config_command(args)\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex f1af373b6..a9b6f9d4b 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -14,12 +14,10 @@\n \n import os\n from contextlib import contextmanager\n-from pathlib import Path\n \n import torch\n \n-from ..commands.config.cluster import ClusterConfig\n-from ..commands.config.config_args import default_json_config_file\n+from ..commands.config.default import write_basic_config # noqa: F401\n from ..state import AcceleratorState\n from .dataclasses import DistributedType\n from .imports import is_deepspeed_available, is_tpu_available\n@@ -113,49 +111,3 @@ def get_pretty_name(obj):\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n-\n-\n-def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\n- \"\"\"\n- Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n- set CPU if it is a CPU-only machine.\n-\n- Args:\n- mixed_precision (`str`, *optional*, defaults to \"no\"):\n- Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n- save_location (`str`, *optional*, defaults to `default_json_config_file`):\n- Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default\n- location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting\n- the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.\n- \"\"\"\n- path = Path(save_location)\n- path.parent.mkdir(parents=True, exist_ok=True)\n- if path.exists():\n- print(\n- f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\n- )\n- return\n- mixed_precision = mixed_precision.lower()\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n- raise ValueError(f\"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}\")\n- config = {\n- \"compute_environment\": \"LOCAL_MACHINE\",\n- \"mixed_precision\": mixed_precision,\n- \"dynamo_backend\": dynamo_backend,\n- }\n- if torch.cuda.is_available():\n- num_gpus = torch.cuda.device_count()\n- config[\"num_processes\"] = num_gpus\n- config[\"use_cpu\"] = False\n- if num_gpus > 1:\n- config[\"distributed_type\"] = \"MULTI_GPU\"\n- else:\n- config[\"distributed_type\"] = \"NO\"\n- else:\n- num_gpus = 0\n- config[\"use_cpu\"] = True\n- config[\"num_processes\"] = 1\n- config[\"distributed_type\"] = \"NO\"\n- if not path.exists():\n- config = ClusterConfig(**config)\n- config.to_json_file(path)\n", "code_comments": [ { "body": "```suggestion\r\n```", "diff_hunk": "@@ -15,73 +15,42 @@\n # limitations under the License.\n \n import argparse\n-import os\n \n-from accelerate.utils import ComputeEnvironment\n+from .config import config_command, config_command_parser\n+from .config_args import default_config_file, load_config_from_file # noqa: F401\n+from .default import default_command_parser, default_config_command\n \n-from .cluster import get_cluster_input\n-from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n-from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\n-from .sagemaker import get_sagemaker_input\n \n+def filter_command_args(args: dict, args_prefix: str):\n+ \"Filters args while only keeping ones that are prefixed with `{args_prefix}.`\"\n+ new_args = argparse.Namespace()\n+ for key, value in vars(args).items():\n+ print(key, value)", "from_author": false }, { "body": "is this required here anymore?", "diff_hunk": "@@ -14,12 +14,10 @@\n \n import os\n from contextlib import contextmanager\n-from pathlib import Path\n \n import torch\n \n-from ..commands.config.cluster import ClusterConfig\n-from ..commands.config.config_args import default_json_config_file\n+from ..commands.config.default import write_basic_config # noqa: F401", "from_author": false }, { "body": "Yep, this is the nit about making sure we can keep `from accelerate.utils import write_basic_config` ", "diff_hunk": "@@ -14,12 +14,10 @@\n \n import os\n from contextlib import contextmanager\n-from pathlib import Path\n \n import torch\n \n-from ..commands.config.cluster import ClusterConfig\n-from ..commands.config.config_args import default_json_config_file\n+from ..commands.config.default import write_basic_config # noqa: F401", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/851", "pr_id": 1121918243 }, { "diff": "diff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nindex 835d4e0d9..2652a2105 100644\n--- a/src/accelerate/scheduler.py\n+++ b/src/accelerate/scheduler.py\n@@ -69,8 +69,9 @@ def step(self, *args, **kwargs):\n num_processes = AcceleratorState().num_processes\n for _ in range(num_processes):\n # Special case when using OneCycle and `drop_last` was not used\n- if hasattr(self.scheduler, \"total_steps\") and self.scheduler._step_count <= self.scheduler.total_steps:\n- self.scheduler.step(*args, **kwargs)\n+ if hasattr(self.scheduler, \"total_steps\"):\n+ if self.scheduler._step_count <= self.scheduler.total_steps:\n+ self.scheduler.step(*args, **kwargs)\n else:\n self.scheduler.step(*args, **kwargs)\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/849", "pr_id": 1121455495 }, { "diff": "diff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 880981594..bca55aa87 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -230,6 +230,7 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n model.eval()\n accurate = 0\n+ num_elems = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -239,9 +240,10 @@ def training_function(config, args):\n predictions = outputs.argmax(dim=-1)\n predictions, references = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n accurate_preds = predictions == references\n+ num_elems += accurate_preds.shape[0]\n accurate += accurate_preds.long().sum()\n \n- eval_metric = accurate.item() / accelerator.gradient_state.samples_seen\n+ eval_metric = accurate.item() / num_elems\n # Use accelerator.print to print only on the main process.\n accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n if args.with_tracking:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/848", "pr_id": 1121430707 }, { "diff": "diff --git a/docs/source/package_reference/logging.mdx b/docs/source/package_reference/logging.mdx\nindex 675af41ee..85e844690 100644\n--- a/docs/source/package_reference/logging.mdx\n+++ b/docs/source/package_reference/logging.mdx\n@@ -21,4 +21,14 @@ To utilize this replace cases of `logging` with `accelerate.logging`:\n + logger = get_logger(__name__)\n ```\n \n+## Setting the log level\n+\n+The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing \n+`log_level` to `get_logger`:\n+```python\n+from accelerate.logging import get_logger\n+\n+logger = get_logger(__name__, log_level=\"INFO\")\n+```\n+\n [[autodoc]] logging.get_logger\n\\ No newline at end of file\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex f595c08cc..7a9ac92f4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -207,11 +207,6 @@ def __init__(\n dynamo_backend: Union[DynamoBackend, str] = None,\n ):\n self.logging_dir = logging_dir\n- trackers = filter_trackers(log_with, self.logging_dir)\n- if len(trackers) < 1 and log_with is not None:\n- warnings.warn(f\"`log_with={log_with}` was passed but no supported trackers are currently installed.\")\n- self.log_with = trackers\n-\n if mixed_precision is not None:\n mixed_precision = str(mixed_precision)\n if mixed_precision not in PrecisionType:\n@@ -303,6 +298,11 @@ def __init__(\n **kwargs,\n )\n \n+ trackers = filter_trackers(log_with, self.logging_dir)\n+ if len(trackers) < 1 and log_with is not None:\n+ warnings.warn(f\"`log_with={log_with}` was passed but no supported trackers are currently installed.\")\n+ self.log_with = trackers\n+\n if (\n (mixed_precision != \"bf16\")\n and getattr(self.state, \"downcast_bfloat\", False)\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\nindex dca093215..46118f7c6 100644\n--- a/src/accelerate/logging.py\n+++ b/src/accelerate/logging.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import logging\n+import os\n \n from .state import AcceleratorState\n from .utils import DistributedType\n@@ -49,7 +50,7 @@ def log(self, level, msg, *args, **kwargs):\n self.logger.log(level, msg, *args, **kwargs)\n \n \n-def get_logger(name: str):\n+def get_logger(name: str, log_level: str = None):\n \"\"\"\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\n \n@@ -58,6 +59,8 @@ def get_logger(name: str):\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n+ log_level (`str`, *optional*):\n+ The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not\n \n Example:\n \n@@ -68,7 +71,14 @@ def get_logger(name: str):\n \n >>> logger.info(\"My log\", main_process_only=False)\n >>> logger.debug(\"My log\", main_process_only=True)\n+\n+ >>> logger = get_logger(__name__, accelerate_log_level=\"DEBUG\")\n+ >>> logger.info(\"My log\")\n+ >>> logger.debug(\"My second log\")\n ```\n \"\"\"\n+ if log_level is None:\n+ log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", \"INFO\")\n logger = logging.getLogger(name)\n+ logging.basicConfig(level=log_level.upper())\n return MultiProcessAdapter(logger, {})\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 3a31fed93..689ee5033 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -577,7 +577,6 @@ def filter_trackers(\n if log_with is not None:\n if not isinstance(log_with, (list, tuple)):\n log_with = [log_with]\n- logger.debug(f\"{log_with}\")\n if \"all\" in log_with or LoggerType.ALL in log_with:\n loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n else:\n", "code_comments": [ { "body": "Is this a standard name that comes from `logging`? Should we also look for an `ACCELERATE_LOG_LEVEL` if users want to use a different one from the standard one? (So priority would be to look for the accelerate log level, then the general one).", "diff_hunk": "@@ -21,4 +21,14 @@ To utilize this replace cases of `logging` with `accelerate.logging`:\n + logger = get_logger(__name__)\n ```\n \n+## Setting the log level\n+\n+The log level can be set with the `LOG_LEVEL` environment variable or by passing ", "from_author": false }, { "body": "```suggestion\r\n log_level (`str`, *optional*):\r\n```\r\nMake sure to re-read the doc-writing guide ;-)", "diff_hunk": "@@ -58,6 +59,8 @@ def get_logger(name: str):\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n+ log_level (`str`, `optional`, defaults to `None`):", "from_author": false }, { "body": "There is not, generally I've seen it as `LOGLEVEL` for the recommended usage but then it's advised to pull it in from `os.environ`. However you're right having something Accelerate specific would be much better so I will opt for `ACCELERATE_LOG_LEVEL` instead", "diff_hunk": "@@ -21,4 +21,14 @@ To utilize this replace cases of `logging` with `accelerate.logging`:\n + logger = get_logger(__name__)\n ```\n \n+## Setting the log level\n+\n+The log level can be set with the `LOG_LEVEL` environment variable or by passing ", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/842", "pr_id": 1118288220 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex 9643d3b4f..562e1f4a4 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -35,6 +35,28 @@ accelerate config [arguments]\n (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n * `-h`, `--help` (`bool`) -- Show a help message and exit\n \n+## accelerate default-config \n+\n+**Command**:\n+\n+`accelerate default-config` or `accelerate-default-config`\n+\n+Create a default config file for Accelerate with only a few flags set.\n+\n+**Usage**: \n+\n+```bash\n+accelerate default-config [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content\n+ of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n+\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\n+\n ## accelerate env\n \n **Command**:\ndiff --git a/setup.py b/setup.py\nindex f96ebc440..622d6de69 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -48,6 +48,7 @@\n \"accelerate=accelerate.commands.accelerate_cli:main\",\n \"accelerate-config=accelerate.commands.config:main\",\n \"accelerate-launch=accelerate.commands.launch:main\",\n+ \"accelerate-default-config=accelerate.commands.config.default:main\",\n ]\n },\n python_requires=\">=3.7.0\",\ndiff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\nindex f0e76fd2c..8ffda3572 100644\n--- a/src/accelerate/commands/accelerate_cli.py\n+++ b/src/accelerate/commands/accelerate_cli.py\n@@ -17,6 +17,7 @@\n from argparse import ArgumentParser\n \n from accelerate.commands.config import config_command_parser\n+from accelerate.commands.config.default import default_command_parser\n from accelerate.commands.env import env_command_parser\n from accelerate.commands.launch import launch_command_parser\n from accelerate.commands.test import test_command_parser\n@@ -29,6 +30,7 @@ def main():\n \n # Register commands\n config_command_parser(subparsers=subparsers)\n+ default_command_parser(subparsers=subparsers)\n env_command_parser(subparsers=subparsers)\n launch_command_parser(subparsers=subparsers)\n tpu_command_parser(subparsers=subparsers)\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex 4b02ac40c..2400555c4 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -25,6 +25,9 @@\n from .sagemaker import get_sagemaker_input\n \n \n+description = \"Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine\"\n+\n+\n def get_user_input():\n compute_environment = _ask_options(\n \"In which compute environment are you running?\",\n@@ -40,9 +43,9 @@ def get_user_input():\n \n def config_command_parser(subparsers=None):\n if subparsers is not None:\n- parser = subparsers.add_parser(\"config\")\n+ parser = subparsers.add_parser(\"config\", description=description)\n else:\n- parser = argparse.ArgumentParser(\"Accelerate config command\")\n+ parser = argparse.ArgumentParser(\"Accelerate config command\", description=description)\n \n parser.add_argument(\n \"--config_file\",\ndiff --git a/src/accelerate/commands/config/default.py b/src/accelerate/commands/config/default.py\nnew file mode 100644\nindex 000000000..956c3d719\n--- /dev/null\n+++ b/src/accelerate/commands/config/default.py\n@@ -0,0 +1,73 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+\n+from accelerate.utils import write_basic_config\n+\n+from .config_args import default_json_config_file\n+\n+\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\n+\n+\n+def default_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"default-config\", description=description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate default-config command\", description=description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ default=default_json_config_file,\n+ help=(\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n+ \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n+ \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n+ \"with 'huggingface'.\"\n+ ),\n+ dest=\"save_location\",\n+ )\n+\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ type=str,\n+ help=\"Whether or not to use mixed precision training. \"\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n+ default=\"no\",\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=config_command)\n+ return parser\n+\n+\n+def config_command(args):\n+ args = vars(args)\n+ args.pop(\"func\", None)\n+ write_basic_config(**args)\n+\n+\n+def main():\n+ parser = default_command_parser()\n+ args = parser.parse_args()\n+ config_command(args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n", "code_comments": [ { "body": "I wouldn't include this in the default.", "diff_hunk": "@@ -0,0 +1,82 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+\n+from accelerate.utils import write_basic_config\n+\n+from .config_utils import DYNAMO_BACKENDS\n+\n+\n+description = \"Create a default config file for Accelerate with only a few flags set.\"\n+\n+\n+def default_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"default-config\", description=description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate default-config command\", description=description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ default=None,\n+ help=(\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n+ \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n+ \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n+ \"with 'huggingface'.\"\n+ ),\n+ dest=\"save_location\",\n+ )\n+\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ type=str,\n+ help=\"Whether or not to use mixed precision training. \"\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n+ default=\"no\",\n+ )\n+\n+ parser.add_argument(\n+ \"--dynamo_backend\",\n+ type=str,\n+ choices=[\"no\"] + [b.lower() for b in DYNAMO_BACKENDS],\n+ help=\"Choose a backend to optimize your training with dynamo, see more at \"\n+ \"https://github.com/pytorch/torchdynamo.\",\n+ default=\"no\",\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=config_command)\n+ return parser", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/840", "pr_id": 1118231769 }, { "diff": "diff --git a/src/accelerate/commands/menu/helpers.py b/src/accelerate/commands/menu/helpers.py\nindex 2cc2ece3f..687f05c50 100644\n--- a/src/accelerate/commands/menu/helpers.py\n+++ b/src/accelerate/commands/menu/helpers.py\n@@ -37,6 +37,10 @@ def forceWrite(content, end=\"\"):\n sys.stdout.flush()\n \n \n+def writeColor(content, color, end=\"\"):\n+ forceWrite(f\"\\u001b[{color}m{content}\\u001b[0m\", end)\n+\n+\n def reset_cursor():\n forceWrite(\"\\r\")\n \ndiff --git a/src/accelerate/commands/menu/selection_menu.py b/src/accelerate/commands/menu/selection_menu.py\nindex d4af34c8f..751f6818a 100644\n--- a/src/accelerate/commands/menu/selection_menu.py\n+++ b/src/accelerate/commands/menu/selection_menu.py\n@@ -16,7 +16,7 @@\n Main driver for the selection menu, based on https://github.com/bchao1/bullet\n \"\"\"\n from . import cursor, input\n-from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor\n+from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor\n from .keymap import KEYMAP\n \n \n@@ -34,7 +34,8 @@ def __init__(self, prompt: str = None, choices: list = []):\n def print_choice(self, index: int):\n \"Prints the choice at the given index\"\n if index == self.position:\n- forceWrite(f\" βž” {self.choices[index]}\")\n+ forceWrite(\" βž” \")\n+ writeColor(self.choices[index], 32)\n else:\n forceWrite(f\" {self.choices[index]}\")\n reset_cursor()\n@@ -108,5 +109,6 @@ def run(self, default_choice: int = 0):\n for _ in range(len(self.choices) + 1):\n move_cursor(1, \"UP\")\n clear_line()\n- forceWrite(f\" * {self.choices[choice]}\", \"\\n\")\n+ forceWrite(\" βž” \")\n+ writeColor(self.choices[choice], 32, \"\\n\")\n return choice\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/839", "pr_id": 1117850136 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 15996bfd0..a653b9d6a 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -164,14 +164,13 @@ def get_cluster_input():\n default=2,\n )\n \n+ deepspeed_devices = [\"none\", \"cpu\", \"nvme\"]\n if deepspeed_config[\"zero_stage\"] >= 2:\n deepspeed_config[\"offload_optimizer_device\"] = _ask_options(\n- \"Where to offload optimizer states?\",\n- [\"none\", \"cpu\", \"nvme\"],\n+ \"Where to offload optimizer states?\", deepspeed_devices, lambda x: deepspeed_devices[int(x)]\n )\n deepspeed_config[\"offload_param_device\"] = _ask_options(\n- \"Where to offload parameters?\",\n- [\"none\", \"cpu\", \"nvme\"],\n+ \"Where to offload parameters?\", deepspeed_devices, lambda x: deepspeed_devices[int(x)]\n )\n deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\n \"How many gradient accumulation steps you're passing in your script? [1]: \",\n@@ -294,7 +293,6 @@ def get_cluster_input():\n default=1e8,\n )\n fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy?\"\n- fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"\n fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_options(\n fsdp_backward_prefetch_query,\n FSDP_BACKWARD_PREFETCH,\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_836). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/836", "pr_id": 1117172405 }, { "diff": "diff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex 11719835a..4b02ac40c 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -21,15 +21,15 @@\n \n from .cluster import get_cluster_input\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n-from .config_utils import _ask_field, _convert_compute_environment\n+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401\n from .sagemaker import get_sagemaker_input\n \n \n def get_user_input():\n- compute_environment = _ask_field(\n- \"In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): \",\n+ compute_environment = _ask_options(\n+ \"In which compute environment are you running?\",\n+ [\"This machine\", \"AWS (Amazon SageMaker)\"],\n _convert_compute_environment,\n- error_message=\"Please enter 0 or 1\",\n )\n if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n config = get_sagemaker_input()\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 5f032debc..15996bfd0 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -31,14 +31,21 @@\n FSDP_STATE_DICT_TYPE,\n )\n from .config_args import ClusterConfig\n-from .config_utils import _ask_field, _convert_distributed_mode, _convert_dynamo_backend, _convert_yes_no_to_bool\n+from .config_utils import (\n+ _ask_field,\n+ _ask_options,\n+ _convert_distributed_mode,\n+ _convert_dynamo_backend,\n+ _convert_mixed_precision,\n+ _convert_yes_no_to_bool,\n+)\n \n \n def get_cluster_input():\n- distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): \",\n+ distributed_type = _ask_options(\n+ \"Which type of machine are you using?\",\n+ [\"No distributed training\", \"multi-CPU\", \"multi-GPU\", \"TPU\", \"MPS\"],\n _convert_distributed_mode,\n- error_message=\"Please enter 0, 1, 2, 3 or 4.\",\n )\n \n machine_rank = 0\n@@ -60,10 +67,10 @@ def get_cluster_input():\n default=1,\n )\n if num_machines > 1:\n- machine_rank = _ask_field(\n- \"What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: \",\n+ machine_rank = _ask_options(\n+ \"What is the rank of this machine?\",\n+ list(range(num_machines)),\n lambda x: int(x),\n- default=0,\n )\n main_process_ip = _ask_field(\n \"What is the IP address of the machine that will host the main process? \",\n@@ -102,11 +109,22 @@ def get_cluster_input():\n error_message=\"Please enter yes or no.\",\n )\n if use_dynamo:\n- dynamo_backend = _ask_field(\n- \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\n+ dynamo_backend = _ask_options(\n+ \"Which dynamo backend would you like to use?\",\n+ [\n+ \"eager\",\n+ \"aot_eager\",\n+ \"inductor\",\n+ \"nvfuser\",\n+ \"aot_nvfuser\",\n+ \"aot_cudagraphs\",\n+ \"ofi\",\n+ \"fx2trt\",\n+ \"onnxrt\",\n+ \"ipex\",\n+ ],\n _convert_dynamo_backend,\n- default=DynamoBackend.INDUCTOR,\n- error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\n+ default=2,\n )\n else:\n dynamo_backend = DynamoBackend.NO\n@@ -139,22 +157,21 @@ def get_cluster_input():\n default=\"none\",\n )\n else:\n- deepspeed_config[\"zero_stage\"] = _ask_field(\n- \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n+ deepspeed_config[\"zero_stage\"] = _ask_options(\n+ \"What should be your DeepSpeed's ZeRO optimization stage?\",\n+ [0, 1, 2, 3],\n lambda x: int(x),\n default=2,\n )\n \n if deepspeed_config[\"zero_stage\"] >= 2:\n- deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\n- \"Where to offload optimizer states? [none/cpu/nvme]: \",\n- lambda x: str(x),\n- default=\"none\",\n+ deepspeed_config[\"offload_optimizer_device\"] = _ask_options(\n+ \"Where to offload optimizer states?\",\n+ [\"none\", \"cpu\", \"nvme\"],\n )\n- deepspeed_config[\"offload_param_device\"] = _ask_field(\n- \"Where to offload parameters? [none/cpu/nvme]: \",\n- lambda x: str(x),\n- default=\"none\",\n+ deepspeed_config[\"offload_param_device\"] = _ask_options(\n+ \"Where to offload parameters?\",\n+ [\"none\", \"cpu\", \"nvme\"],\n )\n deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\n \"How many gradient accumulation steps you're passing in your script? [1]: \",\n@@ -194,14 +211,11 @@ def get_cluster_input():\n )\n \n if num_machines > 1:\n- launcher_query = \"Which Type of launcher do you want to use \"\n- for i, launcher in enumerate(DEEPSPEED_MULTINODE_LAUNCHERS):\n- launcher_query += f\"[{i}] {launcher}, \"\n- launcher_query = launcher_query[:-2] + \")? [0]: \"\n- deepspeed_config[\"deepspeed_multinode_launcher\"] = _ask_field(\n+ launcher_query = \"Which Type of launcher do you want to use?\"\n+ deepspeed_config[\"deepspeed_multinode_launcher\"] = _ask_options(\n launcher_query,\n+ DEEPSPEED_MULTINODE_LAUNCHERS,\n lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],\n- default=DEEPSPEED_MULTINODE_LAUNCHERS[0],\n )\n \n if deepspeed_config[\"deepspeed_multinode_launcher\"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n@@ -249,13 +263,11 @@ def get_cluster_input():\n if use_fsdp:\n distributed_type = DistributedType.FSDP\n if distributed_type == DistributedType.FSDP:\n- sharding_strategy_query = \"What should be your sharding strategy (\"\n- for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n- sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n- sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\n+ sharding_strategy_query = \"What should be your sharding strategy?\"\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_options(\n sharding_strategy_query,\n- lambda x: int(x),\n+ FSDP_SHARDING_STRATEGY,\n+ lambda x: int(x) + 1,\n default=1,\n )\n fsdp_config[\"fsdp_offload_params\"] = _ask_field(\n@@ -264,14 +276,11 @@ def get_cluster_input():\n default=False,\n error_message=\"Please enter yes or no.\",\n )\n- fsdp_wrap_query = \"What should be your auto wrap policy (\"\n- for i, wrap_policy in enumerate(FSDP_AUTO_WRAP_POLICY):\n- fsdp_wrap_query += f\"[{i}] {wrap_policy}, \"\n- fsdp_wrap_query = fsdp_wrap_query[:-2] + \")? [0]: \"\n- fsdp_config[\"fsdp_auto_wrap_policy\"] = _ask_field(\n+ fsdp_wrap_query = \"What should be your auto wrap policy?\"\n+ fsdp_config[\"fsdp_auto_wrap_policy\"] = _ask_options(\n fsdp_wrap_query,\n+ FSDP_AUTO_WRAP_POLICY,\n lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],\n- default=\"TRANSFORMER_BASED_WRAP\",\n )\n if fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[0]:\n fsdp_config[\"fsdp_transformer_layer_cls_to_wrap\"] = _ask_field(\n@@ -284,23 +293,18 @@ def get_cluster_input():\n lambda x: int(x),\n default=1e8,\n )\n- fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy (\"\n- for i, backward_prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):\n- fsdp_backward_prefetch_query += f\"[{i}] {backward_prefetch_policy}, \"\n+ fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy?\"\n fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"\n- fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_field(\n+ fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_options(\n fsdp_backward_prefetch_query,\n+ FSDP_BACKWARD_PREFETCH,\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n- default=\"BACKWARD_PRE\",\n )\n- fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n- for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n- fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n- fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n- fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type?\"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_options(\n fsdp_state_dict_type_query,\n+ FSDP_STATE_DICT_TYPE,\n lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n- default=\"FULL_STATE_DICT\",\n )\n \n megatron_lm_config = {}\n@@ -460,10 +464,10 @@ def get_cluster_input():\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n mixed_precision = \"no\"\n else:\n- mixed_precision = _ask_field(\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\n- lambda x: str(x).lower(),\n- default=\"no\",\n+ mixed_precision = _ask_options(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\n+ [\"no\", \"fp16\", \"bf16\"],\n+ _convert_mixed_precision,\n )\n else:\n mixed_precision = \"no\"\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex 8974cddc9..def29fe06 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -14,7 +14,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from ...utils.dataclasses import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType\n+from ...utils.dataclasses import (\n+ ComputeEnvironment,\n+ DistributedType,\n+ DynamoBackend,\n+ PrecisionType,\n+ SageMakerDistributedType,\n+)\n+from ..menu import BulletMenu\n \n \n DYNAMO_BACKENDS = [\n@@ -44,6 +51,12 @@ def _ask_field(input_text, convert_value=None, default=None, error_message=None)\n print(error_message)\n \n \n+def _ask_options(input_text, options=[], convert_value=None, default=0):\n+ menu = BulletMenu(input_text, options)\n+ result = menu.run(default_choice=default)\n+ return convert_value(result) if convert_value is not None else result\n+\n+\n def _convert_compute_environment(value):\n value = int(value)\n return ComputeEnvironment([\"LOCAL_MACHINE\", \"AMAZON_SAGEMAKER\"][value])\n@@ -56,7 +69,12 @@ def _convert_distributed_mode(value):\n \n def _convert_dynamo_backend(value):\n value = int(value)\n- return DynamoBackend(DYNAMO_BACKENDS[value + 1])\n+ return DynamoBackend(DYNAMO_BACKENDS[value])\n+\n+\n+def _convert_mixed_precision(value):\n+ value = int(value)\n+ return PrecisionType([\"no\", \"fp16\", \"bf16\"][value])\n \n \n def _convert_sagemaker_distributed_mode(value):\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex 71de4fea9..22264b41f 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -22,7 +22,9 @@\n from .config_args import SageMakerConfig\n from .config_utils import (\n _ask_field,\n+ _ask_options,\n _convert_dynamo_backend,\n+ _convert_mixed_precision,\n _convert_sagemaker_distributed_mode,\n _convert_yes_no_to_bool,\n )\n@@ -92,8 +94,9 @@ def _get_iam_role_arn(role_name):\n \n \n def get_sagemaker_input():\n- credentials_configuration = _ask_field(\n- \"How do you want to authorize? ([0] AWS Profile, [1] Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)): \",\n+ credentials_configuration = _ask_options(\n+ \"How do you want to authorize?\",\n+ [\"AWS Profile\", \"Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) \"],\n lambda x: int(x),\n )\n aws_profile = None\n@@ -114,8 +117,9 @@ def get_sagemaker_input():\n aws_region = _ask_field(\"Enter your AWS Region: [us-east-1]\", default=\"us-east-1\")\n os.environ[\"AWS_DEFAULT_REGION\"] = aws_region\n \n- role_management = _ask_field(\n- \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs? ([0] provide IAM Role name, [1] create new IAM role using credentials: \",\n+ role_management = _ask_options(\n+ \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?\",\n+ [\"Provide IAM Role name\", \"Create new IAM role using credentials\"],\n lambda x: int(x),\n )\n if role_management == 0:\n@@ -161,12 +165,11 @@ def get_sagemaker_input():\n lambda x: str(x).lower(),\n )\n \n- distributed_type = _ask_field(\n- \"What is the distributed mode? ([0] No distributed training, [1] data parallelism): \",\n+ distributed_type = _ask_options(\n+ \"What is the distributed mode?\",\n+ [\"No distributed training\", \"Data parallelism\"],\n _convert_sagemaker_distributed_mode,\n- error_message=\"Please enter 0 or 1\",\n )\n-\n use_dynamo = _ask_field(\n \"Do you wish to optimize your script with torch dynamo?[yes/NO]:\",\n _convert_yes_no_to_bool,\n@@ -174,22 +177,30 @@ def get_sagemaker_input():\n error_message=\"Please enter yes or no.\",\n )\n if use_dynamo:\n- dynamo_backend = _ask_field(\n- \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\n+ dynamo_backend = _ask_options(\n+ \"Which dynamo backend would you like to use?\",\n+ [\n+ \"eager\",\n+ \"aot_eager\",\n+ \"inductor\",\n+ \"nvfuser\",\n+ \"aot_nvfuser\",\n+ \"aot_cudagraphs\",\n+ \"ofi\",\n+ \"fx2trt\",\n+ \"onnxrt\",\n+ \"ipex\",\n+ ],\n _convert_dynamo_backend,\n- default=DynamoBackend.INDUCTOR,\n- error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\n+ default=2,\n )\n else:\n dynamo_backend = DynamoBackend.NO\n-\n- ec2_instance_query = \"Which EC2 instance type you want to use for your training \"\n+ ec2_instance_query = \"Which EC2 instance type you want to use for your training?\"\n if distributed_type != SageMakerDistributedType.NO:\n- ec2_instance_query += \"(\"\n- for i, instance_type in enumerate(SAGEMAKER_PARALLEL_EC2_INSTANCES):\n- ec2_instance_query += f\"[{i}] {instance_type}, \"\n- ec2_instance_query = ec2_instance_query[:-2] + \")? [0]: \"\n- ec2_instance_type = _ask_field(ec2_instance_query, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)])\n+ ec2_instance_type = _ask_options(\n+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]\n+ )\n else:\n ec2_instance_query += \"? [ml.p3.2xlarge]:\"\n ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default=\"ml.p3.2xlarge\")\n@@ -205,10 +216,10 @@ def get_sagemaker_input():\n default=1,\n )\n \n- mixed_precision = _ask_field(\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [No/FP16/BF16]: \",\n- lambda x: str(x).lower(),\n- default=\"no\",\n+ mixed_precision = _ask_options(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\n+ [\"no\", \"fp16\", \"bf16\"],\n+ _convert_mixed_precision,\n )\n \n if use_dynamo and mixed_precision == \"no\":\ndiff --git a/src/accelerate/commands/menu/__init__.py b/src/accelerate/commands/menu/__init__.py\nnew file mode 100644\nindex 000000000..ec17fba58\n--- /dev/null\n+++ b/src/accelerate/commands/menu/__init__.py\n@@ -0,0 +1,5 @@\n+# flake8: noqa\n+# There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n+# module, but to preserve other warnings. So, don't check this module at all\n+\n+from .selection_menu import BulletMenu\ndiff --git a/src/accelerate/commands/menu/cursor.py b/src/accelerate/commands/menu/cursor.py\nnew file mode 100644\nindex 000000000..c1f0bb7b6\n--- /dev/null\n+++ b/src/accelerate/commands/menu/cursor.py\n@@ -0,0 +1,65 @@\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\n+\"\"\"\n+\n+import os\n+import sys\n+from contextlib import contextmanager\n+\n+\n+# Windows only\n+if os.name == \"nt\":\n+ import ctypes\n+ import msvcrt # noqa\n+\n+ class CursorInfo(ctypes.Structure):\n+ # _fields is a specific attr expected by ctypes\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]\n+\n+\n+def hide_cursor():\n+ if os.name == \"nt\":\n+ ci = CursorInfo()\n+ handle = ctypes.windll.kernel32.GetStdHandle(-11)\n+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))\n+ ci.visible = False\n+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))\n+ elif os.name == \"posix\":\n+ sys.stdout.write(\"\\033[?25l\")\n+ sys.stdout.flush()\n+\n+\n+def show_cursor():\n+ if os.name == \"nt\":\n+ ci = CursorInfo()\n+ handle = ctypes.windll.kernel32.GetStdHandle(-11)\n+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))\n+ ci.visible = True\n+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))\n+ elif os.name == \"posix\":\n+ sys.stdout.write(\"\\033[?25h\")\n+ sys.stdout.flush()\n+\n+\n+@contextmanager\n+def hide():\n+ \"Context manager to hide the terminal cursor\"\n+ try:\n+ hide_cursor()\n+ yield\n+ finally:\n+ show_cursor()\ndiff --git a/src/accelerate/commands/menu/helpers.py b/src/accelerate/commands/menu/helpers.py\nnew file mode 100644\nindex 000000000..2cc2ece3f\n--- /dev/null\n+++ b/src/accelerate/commands/menu/helpers.py\n@@ -0,0 +1,55 @@\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A variety of helper functions and constants when dealing with terminal menu choices, based on\n+https://github.com/bchao1/bullet\n+\"\"\"\n+\n+import enum\n+import shutil\n+import sys\n+\n+\n+TERMINAL_WIDTH, _ = shutil.get_terminal_size()\n+\n+CURSOR_TO_CHAR = {\"UP\": \"A\", \"DOWN\": \"B\", \"RIGHT\": \"C\", \"LEFT\": \"D\"}\n+\n+\n+class Direction(enum.Enum):\n+ UP = 0\n+ DOWN = 1\n+\n+\n+def forceWrite(content, end=\"\"):\n+ sys.stdout.write(content + end)\n+ sys.stdout.flush()\n+\n+\n+def reset_cursor():\n+ forceWrite(\"\\r\")\n+\n+\n+def move_cursor(num_lines: int, direction: str):\n+ forceWrite(f\"\\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}\")\n+\n+\n+def clear_line():\n+ forceWrite(\" \" * TERMINAL_WIDTH)\n+ reset_cursor()\n+\n+\n+def linebreak():\n+ reset_cursor()\n+ forceWrite(\"-\" * TERMINAL_WIDTH)\ndiff --git a/src/accelerate/commands/menu/input.py b/src/accelerate/commands/menu/input.py\nnew file mode 100644\nindex 000000000..266f7e7db\n--- /dev/null\n+++ b/src/accelerate/commands/menu/input.py\n@@ -0,0 +1,86 @@\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+This file contains utilities for handling input from the user and registering specific keys to specific functions,\n+based on https://github.com/bchao1/bullet\n+\"\"\"\n+\n+from typing import List\n+\n+from .keymap import KEYMAP, get_character\n+\n+\n+def mark(key: str):\n+ \"\"\"\n+ Mark the function with the key code so it can be handled in the register\n+ \"\"\"\n+\n+ def decorator(func):\n+ handle = getattr(func, \"handle_key\", [])\n+ handle += [key]\n+ setattr(func, \"handle_key\", handle)\n+ return func\n+\n+ return decorator\n+\n+\n+def mark_multiple(*keys: List[str]):\n+ \"\"\"\n+ Mark the function with the key codes so it can be handled in the register\n+ \"\"\"\n+\n+ def decorator(func):\n+ handle = getattr(func, \"handle_key\", [])\n+ handle += keys\n+ setattr(func, \"handle_key\", handle)\n+ return func\n+\n+ return decorator\n+\n+\n+class KeyHandler(type):\n+ \"\"\"\n+ Metaclass that adds the key handlers to the class\n+ \"\"\"\n+\n+ def __new__(cls, name, bases, attrs):\n+ new_cls = super().__new__(cls, name, bases, attrs)\n+ if not hasattr(new_cls, \"key_handler\"):\n+ setattr(new_cls, \"key_handler\", {})\n+ setattr(new_cls, \"handle_input\", KeyHandler.handle_input)\n+\n+ for value in attrs.values():\n+ handled_keys = getattr(value, \"handle_key\", [])\n+ for key in handled_keys:\n+ new_cls.key_handler[key] = value\n+ return new_cls\n+\n+ @staticmethod\n+ def handle_input(cls):\n+ \"Finds and returns the selected character if it exists in the handler\"\n+ char = get_character()\n+ if char != KEYMAP[\"undefined\"]:\n+ char = ord(char)\n+ handler = cls.key_handler.get(char)\n+ if handler:\n+ cls.current_selection = char\n+ return handler(cls)\n+ else:\n+ return None\n+\n+\n+def register(cls):\n+ \"\"\"Adds KeyHandler metaclass to the class\"\"\"\n+ return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())\ndiff --git a/src/accelerate/commands/menu/keymap.py b/src/accelerate/commands/menu/keymap.py\nnew file mode 100644\nindex 000000000..7ce6c0637\n--- /dev/null\n+++ b/src/accelerate/commands/menu/keymap.py\n@@ -0,0 +1,81 @@\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet\n+\"\"\"\n+\n+\n+import string\n+import sys\n+import termios\n+import tty\n+\n+\n+ARROW_KEY_FLAG = 1 << 8\n+\n+KEYMAP = {\n+ \"tab\": ord(\"\\t\"),\n+ \"newline\": ord(\"\\r\"),\n+ \"esc\": 27,\n+ \"up\": 65 + ARROW_KEY_FLAG,\n+ \"down\": 66 + ARROW_KEY_FLAG,\n+ \"right\": 67 + ARROW_KEY_FLAG,\n+ \"left\": 68 + ARROW_KEY_FLAG,\n+ \"mod_int\": 91,\n+ \"undefined\": sys.maxsize,\n+ \"interrupt\": 3,\n+}\n+\n+KEYMAP[\"arrow_begin\"] = KEYMAP[\"up\"]\n+KEYMAP[\"arrow_end\"] = KEYMAP[\"left\"]\n+\n+for i in range(10):\n+ KEYMAP[str(i)] = ord(str(i))\n+\n+\n+def get_raw_chars():\n+ \"Gets raw characters from inputs\"\n+ fd = sys.stdin.fileno()\n+ old_settings = termios.tcgetattr(fd)\n+ try:\n+ tty.setraw(fd)\n+ ch = sys.stdin.read(1)\n+ finally:\n+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n+ return ch\n+\n+\n+def get_character():\n+ \"Gets a character from the keyboard and returns the key code\"\n+ char = get_raw_chars()\n+ if ord(char) in [KEYMAP[\"interrupt\"], KEYMAP[\"newline\"]]:\n+ return char\n+\n+ elif ord(char) == KEYMAP[\"esc\"]:\n+ combo = get_raw_chars()\n+ if ord(combo) == KEYMAP[\"mod_int\"]:\n+ key = get_raw_chars()\n+ if ord(key) >= KEYMAP[\"arrow_begin\"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP[\"arrow_end\"] - ARROW_KEY_FLAG:\n+ return chr(ord(key) + ARROW_KEY_FLAG)\n+ else:\n+ return KEYMAP[\"undefined\"]\n+ else:\n+ return get_raw_chars()\n+\n+ else:\n+ if char in string.printable:\n+ return char\n+ else:\n+ return KEYMAP[\"undefined\"]\ndiff --git a/src/accelerate/commands/menu/selection_menu.py b/src/accelerate/commands/menu/selection_menu.py\nnew file mode 100644\nindex 000000000..d4af34c8f\n--- /dev/null\n+++ b/src/accelerate/commands/menu/selection_menu.py\n@@ -0,0 +1,112 @@\n+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+Main driver for the selection menu, based on https://github.com/bchao1/bullet\n+\"\"\"\n+from . import cursor, input\n+from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor\n+from .keymap import KEYMAP\n+\n+\n+@input.register\n+class BulletMenu:\n+ \"\"\"\n+ A CLI menu to select a choice from a list of choices using the keyboard.\n+ \"\"\"\n+\n+ def __init__(self, prompt: str = None, choices: list = []):\n+ self.position = 0\n+ self.choices = choices\n+ self.prompt = prompt\n+\n+ def print_choice(self, index: int):\n+ \"Prints the choice at the given index\"\n+ if index == self.position:\n+ forceWrite(f\" βž” {self.choices[index]}\")\n+ else:\n+ forceWrite(f\" {self.choices[index]}\")\n+ reset_cursor()\n+\n+ def move_direction(self, direction: Direction, num_spaces: int = 1):\n+ \"Should not be directly called, used to move a direction of either up or down\"\n+ old_position = self.position\n+ if direction == Direction.DOWN:\n+ if self.position + 1 >= len(self.choices):\n+ return\n+ self.position += num_spaces\n+ else:\n+ if self.position - 1 < 0:\n+ return\n+ self.position -= num_spaces\n+ clear_line()\n+ self.print_choice(old_position)\n+ move_cursor(num_spaces, direction.name)\n+ self.print_choice(self.position)\n+\n+ @input.mark(KEYMAP[\"up\"])\n+ def move_up(self):\n+ self.move_direction(Direction.UP)\n+\n+ @input.mark(KEYMAP[\"down\"])\n+ def move_down(self):\n+ self.move_direction(Direction.DOWN)\n+\n+ @input.mark(KEYMAP[\"newline\"])\n+ def select(self):\n+ move_cursor(len(self.choices) - self.position, \"DOWN\")\n+ return self.position\n+\n+ @input.mark(KEYMAP[\"interrupt\"])\n+ def interrupt(self):\n+ move_cursor(len(self.choices) - self.position, \"DOWN\")\n+ raise KeyboardInterrupt\n+\n+ @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])\n+ def select_row(self):\n+ index = int(chr(self.current_selection))\n+ movement = index - self.position\n+ if index == self.position:\n+ return\n+ if index < len(self.choices):\n+ if self.position > index:\n+ self.move_direction(Direction.UP, -movement)\n+ elif self.position < index:\n+ self.move_direction(Direction.DOWN, movement)\n+ else:\n+ return\n+ else:\n+ return\n+\n+ def run(self, default_choice: int = 0):\n+ \"Start the menu and return the selected choice\"\n+ if self.prompt:\n+ linebreak()\n+ forceWrite(self.prompt, \"\\n\")\n+ forceWrite(\"Please select a choice using the arrow or number keys, and selecting with enter\", \"\\n\")\n+ self.position = default_choice\n+ for i in range(len(self.choices)):\n+ self.print_choice(i)\n+ forceWrite(\"\\n\")\n+ move_cursor(len(self.choices) - self.position, \"UP\")\n+ with cursor.hide():\n+ while True:\n+ choice = self.handle_input()\n+ if choice is not None:\n+ reset_cursor()\n+ for _ in range(len(self.choices) + 1):\n+ move_cursor(1, \"UP\")\n+ clear_line()\n+ forceWrite(f\" * {self.choices[choice]}\", \"\\n\")\n+ return choice\n", "code_comments": [ { "body": "Let's name this submodule `menu` since it's about that?", "diff_hunk": "@@ -0,0 +1,5 @@\n+# flake8: noqa", "from_author": false }, { "body": "You know how I feel about _ prefixing names.", "diff_hunk": "@@ -0,0 +1,64 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\n+\"\"\"\n+\n+import os\n+import sys\n+from contextlib import contextmanager\n+\n+\n+# Windows only\n+if os.name == \"nt\":\n+ import ctypes\n+ import msvcrt # noqa\n+\n+ class _CursorInfo(ctypes.Structure):\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]", "from_author": false }, { "body": "If this is heavily inspired by another lib, it needs to be reflected in the copyright probably.", "diff_hunk": "@@ -0,0 +1,64 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.", "from_author": false }, { "body": "`_fields_` is very specific to `ctypes.Structure`, but will rename `CursorInfo`", "diff_hunk": "@@ -0,0 +1,64 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\n+\"\"\"\n+\n+import os\n+import sys\n+from contextlib import contextmanager\n+\n+\n+# Windows only\n+if os.name == \"nt\":\n+ import ctypes\n+ import msvcrt # noqa\n+\n+ class _CursorInfo(ctypes.Structure):\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]", "from_author": true }, { "body": "This and every function in this module pretty please!", "diff_hunk": "@@ -0,0 +1,64 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet\n+\"\"\"\n+\n+import os\n+import sys\n+from contextlib import contextmanager\n+\n+\n+# Windows only\n+if os.name == \"nt\":\n+ import ctypes\n+ import msvcrt # noqa\n+\n+ class _CursorInfo(ctypes.Structure):\n+ _fields_ = [(\"size\", ctypes.c_int), (\"visible\", ctypes.c_byte)]", "from_author": false }, { "body": "```suggestion\r\n lambda x: int(x)+1,\r\n```", "diff_hunk": "@@ -227,12 +223,10 @@ def get_cluster_input():\n if use_fsdp:\n distributed_type = DistributedType.FSDP\n if distributed_type == DistributedType.FSDP:\n- sharding_strategy_query = \"What should be your sharding strategy (\"\n- for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n- sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n- sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\n+ sharding_strategy_query = \"What should be your sharding strategy?\"\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_options(\n sharding_strategy_query,\n+ FSDP_SHARDING_STRATEGY,\n lambda x: int(x),", "from_author": false }, { "body": "Because it's 1 based numbering", "diff_hunk": "@@ -227,12 +223,10 @@ def get_cluster_input():\n if use_fsdp:\n distributed_type = DistributedType.FSDP\n if distributed_type == DistributedType.FSDP:\n- sharding_strategy_query = \"What should be your sharding strategy (\"\n- for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n- sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n- sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\n+ sharding_strategy_query = \"What should be your sharding strategy?\"\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_options(\n sharding_strategy_query,\n+ FSDP_SHARDING_STRATEGY,\n lambda x: int(x),", "from_author": false }, { "body": "Missing the conversion function here.", "diff_hunk": "@@ -460,10 +463,9 @@ def get_cluster_input():\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n mixed_precision = \"no\"\n else:\n- mixed_precision = _ask_field(\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\n- lambda x: str(x).lower(),\n- default=\"no\",\n+ mixed_precision = _ask_options(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\n+ [\"no\", \"fp16\", \"bf16\"],", "from_author": false }, { "body": "Thanks! Fixed here and for sagemaker", "diff_hunk": "@@ -460,10 +463,9 @@ def get_cluster_input():\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n mixed_precision = \"no\"\n else:\n- mixed_precision = _ask_field(\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\n- lambda x: str(x).lower(),\n- default=\"no\",\n+ mixed_precision = _ask_options(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)?\",\n+ [\"no\", \"fp16\", \"bf16\"],", "from_author": true }, { "body": "This line (297) should have been removed", "diff_hunk": "@@ -284,23 +293,18 @@ def get_cluster_input():\n lambda x: int(x),\n default=1e8,\n )\n- fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy (\"\n- for i, backward_prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):\n- fsdp_backward_prefetch_query += f\"[{i}] {backward_prefetch_policy}, \"\n+ fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy?\"\n fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"", "from_author": false } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_830). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_830). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_830). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_830). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_830). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_830). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/830", "pr_id": 1115322323 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 63ea4a71b..ad22c8ee4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -36,6 +36,7 @@\n DeepSpeedPlugin,\n DistributedDataParallelKwargs,\n DistributedType,\n+ DynamoBackend,\n FullyShardedDataParallelPlugin,\n GradScalerKwargs,\n InitProcessGroupKwargs,\n@@ -163,6 +164,8 @@ class Accelerator:\n kwargs_handlers (`List[KwargHandler]`, *optional*)\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\n are created. See [kwargs](kwargs) for more information.\n+ dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `\"no\"`):\n+ Set to one of the possible dynamo backends to optimize your training with torch dynamo.\n \n **Available attributes:**\n \n@@ -198,6 +201,7 @@ def __init__(\n even_batches: bool = True,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n+ dynamo_backend: Union[DynamoBackend, str] = None,\n ):\n self.logging_dir = logging_dir\n trackers = filter_trackers(log_with, self.logging_dir)\n@@ -219,6 +223,9 @@ def __init__(\n )\n mixed_precision = \"fp16\"\n \n+ if dynamo_backend is not None:\n+ dynamo_backend = DynamoBackend(dynamo_backend.upper())\n+\n if deepspeed_plugin is None: # init from env variables\n deepspeed_plugin = DeepSpeedPlugin() if os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" else None\n else:\n@@ -285,6 +292,7 @@ def __init__(\n self.state = AcceleratorState(\n mixed_precision=mixed_precision,\n cpu=cpu,\n+ dynamo_backend=dynamo_backend,\n deepspeed_plugin=deepspeed_plugin,\n fsdp_plugin=fsdp_plugin,\n megatron_lm_plugin=megatron_lm_plugin,\n@@ -793,6 +801,10 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n self._models.append(model)\n if device_placement:\n model = model.to(self.device)\n+ if self.state.dynamo_backend != DynamoBackend.NO:\n+ import torch._dynamo as dynamo\n+\n+ model = dynamo.optimize(self.state.dynamo_backend.value.lower())(model)\n if self.distributed_type == DistributedType.MULTI_GPU:\n if any(p.requires_grad for p in model.parameters()):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex aa61a6857..5f032debc 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -16,7 +16,13 @@\n \n import os\n \n-from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\n+from ...utils import (\n+ ComputeEnvironment,\n+ DistributedType,\n+ DynamoBackend,\n+ is_deepspeed_available,\n+ is_transformers_available,\n+)\n from ...utils.constants import (\n DEEPSPEED_MULTINODE_LAUNCHERS,\n FSDP_AUTO_WRAP_POLICY,\n@@ -25,7 +31,7 @@\n FSDP_STATE_DICT_TYPE,\n )\n from .config_args import ClusterConfig\n-from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n+from .config_utils import _ask_field, _convert_distributed_mode, _convert_dynamo_backend, _convert_yes_no_to_bool\n \n \n def get_cluster_input():\n@@ -89,6 +95,22 @@ def get_cluster_input():\n else:\n use_cpu = False\n \n+ use_dynamo = _ask_field(\n+ \"Do you wish to optimize your script with torch dynamo?[yes/NO]:\",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_dynamo:\n+ dynamo_backend = _ask_field(\n+ \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\n+ _convert_dynamo_backend,\n+ default=DynamoBackend.INDUCTOR,\n+ error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\n+ )\n+ else:\n+ dynamo_backend = DynamoBackend.NO\n+\n deepspeed_config = {}\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n use_deepspeed = _ask_field(\n@@ -446,6 +468,11 @@ def get_cluster_input():\n else:\n mixed_precision = \"no\"\n \n+ if use_dynamo and mixed_precision == \"no\" and not use_cpu:\n+ print(\n+ \"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.\"\n+ )\n+\n downcast_bf16 = \"no\"\n if distributed_type == DistributedType.TPU and mixed_precision == \"bf16\":\n downcast_bf16 = _ask_field(\n@@ -474,4 +501,5 @@ def get_cluster_input():\n tpu_zone=tpu_zone,\n commands=commands,\n command_file=command_file,\n+ dynamo_backend=dynamo_backend,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex a5a841900..ba492802e 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -22,7 +22,7 @@\n \n import yaml\n \n-from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+from ...utils import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType\n from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION\n \n \n@@ -70,6 +70,7 @@ class BaseConfig:\n distributed_type: Union[DistributedType, SageMakerDistributedType]\n mixed_precision: str\n use_cpu: bool\n+ dynamo_backend: DynamoBackend\n \n def to_dict(self):\n result = self.__dict__\n@@ -92,6 +93,8 @@ def from_json_file(cls, json_file=None):\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\n config_dict[\"use_cpu\"] = False\n+ if \"dynamo_backend\" not in config_dict:\n+ config_dict[\"dynamo_backend\"] = DynamoBackend.NO\n return cls(**config_dict)\n \n def to_json_file(self, json_file):\n@@ -113,6 +116,8 @@ def from_yaml_file(cls, yaml_file=None):\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\n config_dict[\"use_cpu\"] = False\n+ if \"dynamo_backend\" not in config_dict:\n+ config_dict[\"dynamo_backend\"] = DynamoBackend.NO\n \n return cls(**config_dict)\n \n@@ -128,6 +133,8 @@ def __post_init__(self):\n self.distributed_type = SageMakerDistributedType(self.distributed_type)\n else:\n self.distributed_type = DistributedType(self.distributed_type)\n+ if isinstance(self.dynamo_backend, str):\n+ self.dynamo_backend = DynamoBackend(self.dynamo_backend.upper())\n \n \n @dataclass\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex 9dd1f4c99..8974cddc9 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -14,7 +14,21 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from ...utils.dataclasses import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+from ...utils.dataclasses import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType\n+\n+\n+DYNAMO_BACKENDS = [\n+ \"EAGER\",\n+ \"AOT_EAGER\",\n+ \"INDUCTOR\",\n+ \"NVFUSER\",\n+ \"AOT_NVFUSER\",\n+ \"AOT_CUDAGRAPHS\",\n+ \"OFI\",\n+ \"FX2TRT\",\n+ \"ONNXRT\",\n+ \"IPEX\",\n+]\n \n \n def _ask_field(input_text, convert_value=None, default=None, error_message=None):\n@@ -40,6 +54,11 @@ def _convert_distributed_mode(value):\n return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\", \"MPS\"][value])\n \n \n+def _convert_dynamo_backend(value):\n+ value = int(value)\n+ return DynamoBackend(DYNAMO_BACKENDS[value + 1])\n+\n+\n def _convert_sagemaker_distributed_mode(value):\n value = int(value)\n return SageMakerDistributedType([\"NO\", \"DATA_PARALLEL\", \"MODEL_PARALLEL\"][value])\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex b3a45c9e4..71de4fea9 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -17,10 +17,15 @@\n import os\n \n from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES\n-from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType\n+from ...utils.dataclasses import ComputeEnvironment, DynamoBackend, SageMakerDistributedType\n from ...utils.imports import is_boto3_available\n from .config_args import SageMakerConfig\n-from .config_utils import _ask_field, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool\n+from .config_utils import (\n+ _ask_field,\n+ _convert_dynamo_backend,\n+ _convert_sagemaker_distributed_mode,\n+ _convert_yes_no_to_bool,\n+)\n \n \n if is_boto3_available():\n@@ -162,6 +167,22 @@ def get_sagemaker_input():\n error_message=\"Please enter 0 or 1\",\n )\n \n+ use_dynamo = _ask_field(\n+ \"Do you wish to optimize your script with torch dynamo?[yes/NO]:\",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_dynamo:\n+ dynamo_backend = _ask_field(\n+ \"Which dynamo backend would you like to use? ([0] eager, [1] aot_eager, [2] inductor, [3] nvfuser, [5] aot_nvfuser, [6] aot_cudagraphs, [7] ofi, [8] onnxrt, [9] ipex) [2]: \",\n+ _convert_dynamo_backend,\n+ default=DynamoBackend.INDUCTOR,\n+ error_message=\"Please enter 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9.\",\n+ )\n+ else:\n+ dynamo_backend = DynamoBackend.NO\n+\n ec2_instance_query = \"Which EC2 instance type you want to use for your training \"\n if distributed_type != SageMakerDistributedType.NO:\n ec2_instance_query += \"(\"\n@@ -186,15 +207,21 @@ def get_sagemaker_input():\n \n mixed_precision = _ask_field(\n \"Do you wish to use FP16 or BF16 (mixed precision)? [No/FP16/BF16]: \",\n- lambda x: str(x),\n- default=\"No\",\n+ lambda x: str(x).lower(),\n+ default=\"no\",\n )\n \n+ if use_dynamo and mixed_precision == \"no\":\n+ print(\n+ \"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.\"\n+ )\n+\n return SageMakerConfig(\n image_uri=docker_image,\n compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,\n distributed_type=distributed_type,\n use_cpu=False,\n+ dynamo_backend=dynamo_backend,\n ec2_instance_type=ec2_instance_type,\n profile=aws_profile,\n region=aws_region,\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 46d95c713..d96ab2f45 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -30,10 +30,12 @@\n import psutil\n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n+from accelerate.commands.config.config_utils import DYNAMO_BACKENDS\n from accelerate.state import get_int_from_env\n from accelerate.utils import (\n ComputeEnvironment,\n DistributedType,\n+ DynamoBackend,\n PrecisionType,\n PrepareForLaunch,\n _filter_args,\n@@ -171,6 +173,13 @@ def launch_command_parser(subparsers=None):\n resource_args = parser.add_argument_group(\n \"Resource Selection Arguments\", \"Arguments for fine-tuning how available hardware should be used.\"\n )\n+ resource_args.add_argument(\n+ \"--dynamo_backend\",\n+ type=str,\n+ choices=[\"no\"] + [b.lower() for b in DYNAMO_BACKENDS],\n+ help=\"Choose a backend to optimize your training with dynamo, see more at \"\n+ \"https://github.com/pytorch/torchdynamo.\",\n+ )\n resource_args.add_argument(\n \"--mixed_precision\",\n type=str,\n@@ -546,6 +555,13 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+\n+ try:\n+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\n+ except ValueError:\n+ raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\n+ current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\n+\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n \n process = subprocess.Popen(cmd, env=current_env)\n@@ -598,6 +614,13 @@ def multi_gpu_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+\n+ try:\n+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\n+ except ValueError:\n+ raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\n+ current_env[\"DYNAMO_BACKEND\"] = dynamo_backend.value\n+\n if args.use_fsdp:\n current_env[\"USE_FSDP\"] = \"true\"\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\n@@ -893,10 +916,16 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', FutureWarning)\n mixed_precision = \"fp16\"\n \n+ try:\n+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())\n+ except ValueError:\n+ raise ValueError(f\"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.\")\n+\n # Environment variables to be set for use during training job\n environment = {\n \"USE_SAGEMAKER\": \"true\",\n \"MIXED_PRECISION\": str(mixed_precision),\n+ \"DYNAMO_BACKEND\": dynamo_backend.value,\n \"SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\n }\n # configure distribution set up\n@@ -1017,6 +1046,9 @@ def launch_command(args):\n args.mixed_precision = \"fp16\"\n else:\n args.mixed_precision = defaults.mixed_precision\n+ if args.dynamo_backend is None:\n+ warned.append(\"\\t`--dynamo_backend` was set to a value of `'no'`\")\n+ args.dynamo_backend = \"no\"\n else:\n if args.num_processes is None:\n args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\n@@ -1029,6 +1061,9 @@ def launch_command(args):\n args.mixed_precision = \"no\"\n if not hasattr(args, \"use_cpu\"):\n args.use_cpu = args.cpu\n+ if args.dynamo_backend is None:\n+ warned.append(\"\\t`--dynamo_backend` was set to a value of `'no'`\")\n+ args.dynamo_backend = \"no\"\n \n if args.num_cpu_threads_per_process is None:\n args.num_cpu_threads_per_process = 1\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 00ba3ab67..18df61bbc 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -19,6 +19,7 @@\n \n from .utils import (\n DistributedType,\n+ DynamoBackend,\n get_ccl_version,\n get_int_from_env,\n is_ccl_available,\n@@ -57,6 +58,7 @@ def __init__(\n self,\n mixed_precision: str = None,\n cpu: bool = False,\n+ dynamo_backend=None,\n deepspeed_plugin=None,\n fsdp_plugin=None,\n megatron_lm_plugin=None,\n@@ -74,6 +76,10 @@ def __init__(\n mixed_precision = (\n parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision.lower()\n )\n+ dynamo_backend = (\n+ parse_choice_from_env(\"DYNAMO_BACKEND\", \"no\") if dynamo_backend is None else dynamo_backend\n+ )\n+ self.dynamo_backend = DynamoBackend(dynamo_backend.upper())\n if not _from_accelerator:\n raise ValueError(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n@@ -230,6 +236,9 @@ def __init__(\n else:\n self.device = torch.device(\"cuda\")\n self.mixed_precision = mixed_precision\n+\n+ if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision == \"no\" and self.device.type == \"cuda\":\n+ torch.backends.cuda.matmul.allow_tf32 = True\n self.initialized = True\n \n def __repr__(self):\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex a017a2ad5..3ad09ecab 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -8,6 +8,7 @@\n DeepSpeedPlugin,\n DistributedDataParallelKwargs,\n DistributedType,\n+ DynamoBackend,\n FullyShardedDataParallelPlugin,\n GradScalerKwargs,\n InitProcessGroupKwargs,\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 7dd8798e9..e51576865 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -164,6 +164,50 @@ class ComputeEnvironment(str, enum.Enum):\n AMAZON_SAGEMAKER = \"AMAZON_SAGEMAKER\"\n \n \n+class DynamoBackend(str, enum.Enum):\n+ \"\"\"\n+ Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).\n+\n+ Values:\n+\n+ - **NO** -- Do not use torch dynamo.\n+ - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo\n+ issues.\n+ - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's\n+ extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.\n+ - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton\n+ kernels. [Read\n+ more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)\n+ - **NVFUSER** -- nvFuser with TorchScript. [Read\n+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)\n+ - **AOT_NVFUSER** -- nvFuser with AotAutograd. [Read\n+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)\n+ - **AOT_CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read\n+ more](https://github.com/pytorch/torchdynamo/pull/757)\n+ - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read\n+ more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)\n+ - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read\n+ more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)\n+ - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)\n+ - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read\n+ more](https://github.com/intel/intel-extension-for-pytorch).\n+\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\n+ NO = \"NO\"\n+ EAGER = \"EAGER\"\n+ AOT_EAGER = \"AOT_EAGER\"\n+ INDUCTOR = \"INDUCTOR\"\n+ NVFUSER = \"NVFUSER\"\n+ AOT_NVFUSER = \"AOT_NVFUSER\"\n+ AOT_CUDAGRAPHS = \"AOT_CUDAGRAPHS\"\n+ OFI = \"OFI\"\n+ FX2TRT = \"FX2TRT\"\n+ ONNXRT = \"ONNXRT\"\n+ IPEX = \"IPEX\"\n+\n+\n class EnumWithContains(enum.EnumMeta):\n \"A metaclass that adds the ability to check if `self` contains an item with the `in` operator\"\n \ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex ff360038d..f1af373b6 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -115,7 +115,7 @@ def get_pretty_name(obj):\n return str(obj)\n \n \n-def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file):\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file, dynamo_backend=\"no\"):\n \"\"\"\n Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n set CPU if it is a CPU-only machine.\n@@ -138,7 +138,11 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n mixed_precision = mixed_precision.lower()\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n raise ValueError(f\"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}\")\n- config = {\"compute_environment\": \"LOCAL_MACHINE\", \"mixed_precision\": mixed_precision}\n+ config = {\n+ \"compute_environment\": \"LOCAL_MACHINE\",\n+ \"mixed_precision\": mixed_precision,\n+ \"dynamo_backend\": dynamo_backend,\n+ }\n if torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n config[\"num_processes\"] = num_gpus\n", "code_comments": [ { "body": "```suggestion\n Set to one of the possible dynamo backends to optimize your training with torch dynamo.\n```\n", "diff_hunk": "@@ -163,6 +164,8 @@ class Accelerator:\n kwargs_handlers (`List[KwargHandler]`, *optional*)\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\n are created. See [kwargs](kwargs) for more information.\n+ dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `\"no\"`):\n+ Set to one of the possible dynamo backends to optimizer your training with torch dynamo.", "from_author": false }, { "body": "```suggestion\n if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision == \"no\" and self.device.type == \"cuda\":\n```\n", "diff_hunk": "@@ -230,6 +236,9 @@ def __init__(\n else:\n self.device = torch.device(\"cuda\")\n self.mixed_precision = mixed_precision\n+\n+ if self.dynamo_backend != DynamoBackend.NO and self.mixed_precision != \"no\" and self.device.type == \"cuda\":", "from_author": false } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_829). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_829). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_829). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_829). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/829", "pr_id": 1114981317 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 46d95c713..9238fbca9 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -56,9 +56,6 @@\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n \n \n-if is_torch_version(\">=\", \"1.9.0\"):\n- import torch.distributed.run as distrib_run\n-\n logger = logging.getLogger(__name__)\n \n options_to_group = {\n@@ -555,6 +552,8 @@ def simple_launcher(args):\n \n \n def multi_gpu_launcher(args):\n+ if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n num_processes = getattr(args, \"num_processes\")\n num_machines = getattr(args, \"num_machines\")\n main_process_ip = getattr(args, \"main_process_ip\")\n@@ -644,6 +643,8 @@ def multi_gpu_launcher(args):\n \n \n def deepspeed_launcher(args):\n+ if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n if not is_deepspeed_available():\n raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\n num_processes = getattr(args, \"num_processes\")\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex 8642a441a..c65df08dc 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -21,10 +21,6 @@\n from .dataclasses import DistributedType\n \n \n-if is_torch_version(\">=\", \"1.9.0\"):\n- import torch.distributed.run as distrib_run\n-\n-\n def get_launch_prefix():\n \"\"\"\n Grabs the correct launcher for starting a distributed command, such as either `torchrun`, `python -m\n@@ -43,6 +39,8 @@ def _filter_args(args):\n \"\"\"\n Filters out all `accelerate` specific args\n \"\"\"\n+ if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n distrib_args = distrib_run.get_args_parser()\n new_args, _ = distrib_args.parse_known_args()\n \n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_828). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/828", "pr_id": 1114610182 }, { "diff": "diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml\nindex dc56751c6..12c600cb7 100644\n--- a/.github/workflows/build_pr_documentation.yml\n+++ b/.github/workflows/build_pr_documentation.yml\n@@ -9,8 +9,11 @@ concurrency:\n \n jobs:\n build:\n- uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main\n+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@use_hf_hub\n with:\n commit_sha: ${{ github.event.pull_request.head.sha }}\n pr_number: ${{ github.event.number }}\n package: accelerate\n+ secrets:\n+ token: ${{ secrets.HF_DOC_PUSH }}\n+ comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\ndiff --git a/.github/workflows/delete_doc_comment.yml b/.github/workflows/delete_doc_comment.yml\nindex da61d21df..973c2a8b3 100644\n--- a/.github/workflows/delete_doc_comment.yml\n+++ b/.github/workflows/delete_doc_comment.yml\n@@ -7,7 +7,10 @@ on:\n \n jobs:\n delete:\n- uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main\n+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@use_hf_hub\n with:\n pr_number: ${{ github.event.number }}\n package: accelerate\n+ secrets:\n+ token: ${{ secrets.HF_DOC_PUSH }}\n+ comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_827). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/827", "pr_id": 1114535307 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 22d7cfb8d..b4708353c 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\n extras[\"docs\"] = []\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n-extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed<0.7.0\", \"tqdm\"]\n+extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"scikit-learn\", \"deepspeed<0.7.0\", \"tqdm\"]\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n extras[\"rich\"] = [\"rich\"]\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/825", "pr_id": 1113209635 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex acc4a1828..aa61a6857 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -385,11 +385,23 @@ def get_cluster_input():\n )\n command_file = os.path.abspath(command_file)\n else:\n- commands = _ask_field(\n- \"What commands do you wish to run on startup in each pod? \",\n- default=None,\n- error_message=\"Please enter the commands you wish to run on startup in each pod as a single string.\",\n- )\n+ print(\"Please enter each command seperately you wish to run on startup in each pod.\")\n+ commands = []\n+ another_command = True\n+ while another_command:\n+ commands.append(\n+ _ask_field(\n+ \"Please enter a single command to be ran \",\n+ default=None,\n+ error_message=\"Please enter the commands you wish to run on startup in each pod as a single string.\",\n+ )\n+ )\n+ another_command = _ask_field(\n+ \"Do you wish to add another command? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n \n else:\n main_training_function = \"main\"\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 9a1247c55..a5a841900 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -155,7 +155,7 @@ class ClusterConfig(BaseConfig):\n tpu_name: str = None\n tpu_zone: str = None\n command_file: str = None\n- command: List[str] = None\n+ commands: List[str] = None\n \n def __post_init__(self):\n if self.deepspeed_config is None:\ndiff --git a/src/accelerate/commands/tpu.py b/src/accelerate/commands/tpu.py\nindex 59bbb08e9..6b90770c7 100644\n--- a/src/accelerate/commands/tpu.py\n+++ b/src/accelerate/commands/tpu.py\n@@ -89,8 +89,8 @@ def tpu_command_launcher(args):\n defaults = load_config_from_file(args.config_file)\n if not args.command_file and defaults.command_file is not None and not args.command:\n args.command_file = defaults.command_file\n- if not args.command and defaults.command is not None:\n- args.command = defaults.command\n+ if not args.command and defaults.commands is not None:\n+ args.command = defaults.commands\n if not args.tpu_name:\n args.tpu_name = defaults.tpu_name\n if not args.tpu_zone:\n@@ -110,7 +110,8 @@ def tpu_command_launcher(args):\n args.command = [f.read().splitlines()]\n \n # To turn list of lists into list of strings\n- args.command = [line for cmd in args.command for line in cmd]\n+ if isinstance(args.command[0], list):\n+ args.command = [line for cmd in args.command for line in cmd]\n # Default to the shared folder and install accelerate\n new_cmd = [\"cd /usr/share\"]\n if args.install_accelerate:\ndiff --git a/tests/test_configs/latest.yaml b/tests/test_configs/latest.yaml\nindex de6be03a4..456348ef9 100644\n--- a/tests/test_configs/latest.yaml\n+++ b/tests/test_configs/latest.yaml\n@@ -17,5 +17,5 @@ same_network: true\n use_cpu: false\n tpu_name: 'test-tpu'\n tpu_zone: 'us-central1-a'\n-command: null\n+commands: null\n command_file: tests/test_samples/test_command_file.sh\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger fails are unrelated, see https://github.com/huggingface/accelerate/pull/825", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/824", "pr_id": 1113202499 }, { "diff": "diff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\nindex cc5c17418..b22a4dd14 100644\n--- a/docs/source/usage_guides/tracking.mdx\n+++ b/docs/source/usage_guides/tracking.mdx\n@@ -17,11 +17,12 @@ There are a large number of experiment tracking API's available, however getting\n \n ## Integrated Trackers\n \n-Currently `Accelerate` supports three trackers out-of-the-box:\n+Currently `Accelerate` supports four trackers out-of-the-box:\n \n - TensorBoard\n - WandB\n - CometML\n+- MLFlow\n \n To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:\n ```python\ndiff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\nindex 444784b49..176d97d2b 100644\n--- a/docs/source/usage_guides/training_zoo.mdx\n+++ b/docs/source/usage_guides/training_zoo.mdx\n@@ -21,7 +21,10 @@ Below contains a non-exhuastive list of tutorials and scripts showcasing Acceler\n These examples showcase the base features of Accelerate and are a great starting point\n \n - [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py)\n+- [Barebones distributed NLP example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)\n - [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py)\n+- [Barebones distributed computer vision example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb)\n+- [Using Accelerate in Kaggle](https://www.kaggle.com/code/muellerzr/multi-gpu-and-accelerate)\n \n ### Feature Specific Examples\n \n@@ -101,6 +104,10 @@ These are tutorials from libraries that integrate with πŸ€— Accelerate:\n \n - [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/)\n \n+### Stable-Dreamfusion\n+\n+- [Training with Stable-Dreamfusion to convert text to a 3D model](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing)\n+\n ### Tez \n \n - [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/823", "pr_id": 1112992023 }, { "diff": "diff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\nindex 36ace84b9..0fdff58ae 100755\n--- a/examples/by_feature/deepspeed_with_config_support.py\n+++ b/examples/by_feature/deepspeed_with_config_support.py\n@@ -285,10 +285,9 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\n outputs = model(**batch)\n \n loss = outputs.loss\n- losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\n \n losses = torch.cat(losses)\n- losses = losses[: len(eval_dataset)]\n try:\n eval_loss = torch.mean(losses)\n perplexity = math.exp(eval_loss)\n", "code_comments": [ { "body": "This line (291) becomes redundant when using `gather_for_metrics`", "diff_hunk": "@@ -285,7 +285,7 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\n outputs = model(**batch)\n \n loss = outputs.loss\n- losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\n \n losses = torch.cat(losses)\n losses = losses[: len(eval_dataset)]", "from_author": false }, { "body": "Ah yeah, makes sense thank you!", "diff_hunk": "@@ -285,7 +285,7 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\n outputs = model(**batch)\n \n loss = outputs.loss\n- losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\n \n losses = torch.cat(losses)\n losses = losses[: len(eval_dataset)]", "from_author": true } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_821). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/821", "pr_id": 1112054741 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 63ea4a71b..e2248fcfa 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -25,7 +25,7 @@\n import torch\n \n from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\n-from .data_loader import prepare_data_loader\n+from .data_loader import DataLoaderDispatcher, prepare_data_loader\n from .logging import get_logger\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n@@ -87,6 +87,9 @@\n megatron_lm_prepare_scheduler,\n )\n \n+if is_torch_version(\">\", \"1.10.0\"):\n+ from torch.distributed.algorithms.join import Join\n+\n \n if is_tpu_available(check_device=False):\n import torch_xla.distributed.xla_multiprocessing as xmp\n@@ -353,6 +356,7 @@ def __init__(\n self._optimizers = []\n self._models = []\n self._schedulers = []\n+ self._dataloaders = []\n self._custom_objects = []\n \n # RNG Types\n@@ -608,6 +612,93 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\n+ length of the dataset.\n+\n+ Args:\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\n+ even_batches (`bool`, *optional*)\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\n+ the default `Accelerator` value wil be used.\n+\n+ <Tip warning={true}>\n+\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\n+ configuration, this method will have no effect.\n+\n+ </Tip>\n+\n+ <Tip warning={true}>\n+\n+ Overidding `even_batches` will not affect iterable-style data loaders.\n+\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(even_batches=True)\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\n+\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\n+ ... for input, output in dataloader:\n+ ... outputs = model(input)\n+ ... loss = loss_func(outputs)\n+ ... loss.backward()\n+ ... optimizer.step()\n+ ... optimizer.zero_grad()\n+ ```\n+\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ iterable_dl_seen = False\n+ # override value in batch sampler for map-style datasets\n+ for dl_idx, dl in enumerate(self._dataloaders):\n+ if isinstance(dl, DataLoaderDispatcher):\n+ iterable_dl_seen = True\n+ continue\n+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))\n+ dl.batch_sampler.even_batches = even_batches\n+\n+ if iterable_dl_seen:\n+ warnings.warn(\n+ \"Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable\"\n+ )\n+ else:\n+ even_batches = self.even_batches\n+\n+ enable_join = False if even_batches else True\n+ try:\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\n+ yield\n+ finally:\n+ # reset any batch samplers that have been modified\n+ for dl_idx, even_batches_value in dl_even_batches_values:\n+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value\n+ else:\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\n+ if self.distributed_type != DistributedType.NO:\n+ warnings.warn(\n+ \"Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect.\"\n+ )\n+\n+ with contextlib.nullcontext(joinables):\n+ yield\n+\n def print(self, *args, **kwargs):\n \"\"\"\n Use in replacement of `print()` to only print once per server.\n@@ -1117,7 +1208,7 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_p\n \"\"\"\n if device_placement is None:\n device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False\n- return prepare_data_loader(\n+ prepared_data_loader = prepare_data_loader(\n data_loader,\n self.device,\n num_processes=self.num_processes,\n@@ -1128,6 +1219,8 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_p\n dispatch_batches=self.dispatch_batches,\n even_batches=self.even_batches,\n )\n+ self._dataloaders.append(prepared_data_loader)\n+ return prepared_data_loader\n \n def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):\n \"\"\"\n@@ -1611,6 +1704,7 @@ def free_memory(self):\n self._schedulers = []\n self._optimizers = []\n self._models = []\n+ self._dataloaders = []\n self.deepspeed_engine_wrapped = None\n gc.collect()\n torch.cuda.empty_cache()\ndiff --git a/src/accelerate/test_utils/scripts/test_distributed_data_loop.py b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\nindex eaf7c6a34..6576e0358 100644\n--- a/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\n+++ b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\n@@ -14,12 +14,25 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+\n+import warnings\n from typing import List\n+from unittest.mock import Mock\n \n import torch\n-from torch.utils.data import DataLoader, TensorDataset\n+from torch.utils.data import DataLoader, IterableDataset, TensorDataset\n \n from accelerate.accelerator import Accelerator\n+from accelerate.utils.dataclasses import DistributedType\n+\n+\n+class DummyIterableDataset(IterableDataset):\n+ def __init__(self, data):\n+ self.data = data\n+\n+ def __iter__(self):\n+ for element in self.data:\n+ yield element\n \n \n def create_accelerator(even_batches=True):\n@@ -28,11 +41,14 @@ def create_accelerator(even_batches=True):\n return accelerator\n \n \n-def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False):\n \"\"\"\n Create a simple DataLoader to use during the test cases\n \"\"\"\n- dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\n+ if iterable:\n+ dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size)))\n+ else:\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\n \n dl = DataLoader(dataset, batch_size=batch_size)\n dl = accelerator.prepare(dl)\n@@ -103,7 +119,96 @@ def test_can_disable_even_batches():\n )\n \n \n-if __name__ == \"__main__\":\n+def test_can_join_uneven_inputs():\n+ accelerator = create_accelerator(even_batches=False)\n+\n+ model = torch.nn.Linear(1, 1)\n+ ddp_model = accelerator.prepare(model)\n+\n+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n+\n+ batch_idxs = []\n+ with accelerator.join_uneven_inputs([ddp_model]):\n+ for batch_idx, batch in enumerate(dl):\n+ output = ddp_model(batch[0].float())\n+ loss = output.sum()\n+ loss.backward()\n+ batch_idxs.append(batch_idx)\n+\n+ accelerator.wait_for_everyone()\n+\n+ if accelerator.process_index == 0:\n+ assert batch_idxs == [0, 1]\n+ elif accelerator.process_index == 1:\n+ assert batch_idxs == [0]\n+\n+\n+def test_join_raises_warning_for_non_ddp_distributed(accelerator):\n+\n+ with warnings.catch_warnings(record=True) as w:\n+ with accelerator.join_uneven_inputs([Mock()]):\n+ pass\n+\n+ assert issubclass(w[-1].category, UserWarning)\n+ assert \"only supported for multi-GPU\" in str(w[-1].message)\n+\n+\n+def test_join_can_override_even_batches():\n+ default_even_batches = True\n+ overridden_even_batches = False\n+ accelerator = create_accelerator(even_batches=default_even_batches)\n+ model = torch.nn.Linear(1, 1)\n+ ddp_model = accelerator.prepare(model)\n+ train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n+ valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n+\n+ with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):\n+ train_dl_overridden_value = train_dl.batch_sampler.even_batches\n+ valid_dl_overridden_value = valid_dl.batch_sampler.even_batches\n+\n+ assert train_dl_overridden_value == overridden_even_batches\n+ assert valid_dl_overridden_value == overridden_even_batches\n+ assert train_dl.batch_sampler.even_batches == default_even_batches\n+ assert valid_dl.batch_sampler.even_batches == default_even_batches\n+\n+\n+def test_join_can_override_for_mixed_type_dataloaders():\n+ default_even_batches = True\n+ overridden_even_batches = False\n+ accelerator = create_accelerator(even_batches=default_even_batches)\n+ model = torch.nn.Linear(1, 1)\n+ ddp_model = accelerator.prepare(model)\n+ create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)\n+ batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n+\n+ with warnings.catch_warnings():\n+ warnings.filterwarnings(\"ignore\")\n+ try:\n+ with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):\n+ batch_dl_overridden_value = batch_dl.batch_sampler.even_batches\n+ except AttributeError:\n+ # ensure attribute error is not raised when processing iterable dl\n+ raise AssertionError\n+\n+ assert batch_dl_overridden_value == overridden_even_batches\n+ assert batch_dl.batch_sampler.even_batches == default_even_batches\n+\n+\n+def test_join_raises_warning_for_iterable_when_overriding_even_batches():\n+ accelerator = create_accelerator()\n+ model = torch.nn.Linear(1, 1)\n+ ddp_model = accelerator.prepare(model)\n+ create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)\n+\n+ with warnings.catch_warnings(record=True) as w:\n+ with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\n+ pass\n+\n+ assert issubclass(w[-1].category, UserWarning)\n+ assert \"only supported for map-style datasets\" in str(w[-1].message)\n+\n+\n+def main():\n accelerator = create_accelerator()\n \n accelerator.print(\"Test that even_batches variable ensures uniform batches across processes\")\n@@ -111,3 +216,25 @@ def test_can_disable_even_batches():\n \n accelerator.print(\"Run tests with even_batches disabled\")\n test_can_disable_even_batches()\n+\n+ accelerator.print(\"Test joining uneven inputs\")\n+ test_can_join_uneven_inputs()\n+\n+ accelerator.print(\"Test overriding even_batches when joining uneven inputs\")\n+ test_join_can_override_even_batches()\n+\n+ accelerator.print(\"Test overriding even_batches for mixed dataloader types\")\n+ test_join_can_override_for_mixed_type_dataloaders()\n+\n+ accelerator.print(\"Test overriding even_batches raises a warning for iterable dataloaders\")\n+ test_join_raises_warning_for_iterable_when_overriding_even_batches()\n+\n+ accelerator.print(\"Test join with non DDP distributed raises warning\")\n+ original_state = accelerator.state.distributed_type\n+ accelerator.state.distributed_type = DistributedType.FSDP\n+ test_join_raises_warning_for_non_ddp_distributed(accelerator)\n+ accelerator.state.distributed_type = original_state\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/tests/test_accelerator.py b/tests/test_accelerator.py\nnew file mode 100644\nindex 000000000..19d6c1655\n--- /dev/null\n+++ b/tests/test_accelerator.py\n@@ -0,0 +1,51 @@\n+import unittest\n+\n+import torch\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate.accelerator import Accelerator\n+from accelerate.state import AcceleratorState\n+\n+\n+def create_components():\n+ model = torch.nn.Linear(2, 4)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n+ scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)\n+ train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))\n+ valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))\n+\n+ return model, optimizer, scheduler, train_dl, valid_dl\n+\n+\n+class AcceleratorTester(unittest.TestCase):\n+ def test_prepared_objects_are_referenced(self):\n+ accelerator = Accelerator()\n+ model, optimizer, scheduler, train_dl, valid_dl = create_components()\n+\n+ (\n+ prepared_model,\n+ prepared_optimizer,\n+ prepared_scheduler,\n+ prepared_train_dl,\n+ prepared_valid_dl,\n+ ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\n+\n+ self.assertTrue(prepared_model in accelerator._models)\n+ self.assertTrue(prepared_optimizer in accelerator._optimizers)\n+ self.assertTrue(prepared_scheduler in accelerator._schedulers)\n+ self.assertTrue(prepared_train_dl in accelerator._dataloaders)\n+ self.assertTrue(prepared_valid_dl in accelerator._dataloaders)\n+ AcceleratorState._reset_state()\n+\n+ def test_free_memory_dereferences_prepared_components(self):\n+ accelerator = Accelerator()\n+ model, optimizer, scheduler, train_dl, valid_dl = create_components()\n+ accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\n+\n+ accelerator.free_memory()\n+\n+ self.assertTrue(len(accelerator._models) == 0)\n+ self.assertTrue(len(accelerator._optimizers) == 0)\n+ self.assertTrue(len(accelerator._schedulers) == 0)\n+ self.assertTrue(len(accelerator._dataloaders) == 0)\n+ AcceleratorState._reset_state()\n", "code_comments": [ { "body": "Maybe only throw an error if there are no dataloader with that attribute? I can imagine having an iterable dataset for training but a standard dataset for evaluation.", "diff_hunk": "@@ -608,6 +612,41 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.NO:\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\n+ with contextlib.nullcontext(joinables):\n+ yield\n+\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ for dl in self._dataloaders:\n+ if not hasattr(dl, \"even_batches\"):\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")", "from_author": false }, { "body": "I'd just issue a warning and exit.", "diff_hunk": "@@ -608,6 +612,41 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.NO:\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\n+ with contextlib.nullcontext(joinables):\n+ yield\n+\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ for dl in self._dataloaders:\n+ if not hasattr(dl, \"even_batches\"):\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")\n+ dl_even_batches_values.append(dl.even_batches)\n+ dl.even_batches = even_batches\n+ else:\n+ even_batches = self.even_batches\n+\n+ enable_join = False if even_batches else True\n+ try:\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\n+ yield\n+ finally:\n+ for dl, even_batches_value in zip(self._dataloaders, dl_even_batches_values):\n+ dl.even_batches = even_batches_value\n+ else:\n+ raise ValueError(\"Joining uneven inputs is only supported for DistributedDataParallel training\")", "from_author": false }, { "body": "This test will need two processes, so probably needs some decorator/special launcher.", "diff_hunk": "@@ -103,11 +117,78 @@ def test_can_disable_even_batches():\n )\n \n \n-if __name__ == \"__main__\":\n+def test_can_join_uneven_inputs():\n+ accelerator = create_accelerator(even_batches=False)\n+\n+ model = torch.nn.Linear(1, 1)\n+ ddp_model = accelerator.prepare(model)\n+\n+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n+\n+ batch_idxs = []\n+ with accelerator.join_uneven_inputs([ddp_model]):\n+ for batch_idx, batch in enumerate(dl):\n+ output = ddp_model(batch[0].float())\n+ loss = output.sum()\n+ loss.backward()\n+ batch_idxs.append(batch_idx)\n+\n+ accelerator.wait_for_everyone()\n+\n+ if accelerator.process_index == 0:", "from_author": false }, { "body": "You are correct. I think that this should be taken care of here, from my previous PR: https://github.com/huggingface/accelerate/blob/main/tests/test_multigpu.py#L49", "diff_hunk": "@@ -103,11 +117,78 @@ def test_can_disable_even_batches():\n )\n \n \n-if __name__ == \"__main__\":\n+def test_can_join_uneven_inputs():\n+ accelerator = create_accelerator(even_batches=False)\n+\n+ model = torch.nn.Linear(1, 1)\n+ ddp_model = accelerator.prepare(model)\n+\n+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n+\n+ batch_idxs = []\n+ with accelerator.join_uneven_inputs([ddp_model]):\n+ for batch_idx, batch in enumerate(dl):\n+ output = ddp_model(batch[0].float())\n+ loss = output.sum()\n+ loss.backward()\n+ batch_idxs.append(batch_idx)\n+\n+ accelerator.wait_for_everyone()\n+\n+ if accelerator.process_index == 0:", "from_author": true }, { "body": "Good spot. Based on the other feedback, I have just raised a warning for this rather than an exception", "diff_hunk": "@@ -608,6 +612,41 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.NO:\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\n+ with contextlib.nullcontext(joinables):\n+ yield\n+\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ for dl in self._dataloaders:\n+ if not hasattr(dl, \"even_batches\"):\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")", "from_author": true }, { "body": "Done", "diff_hunk": "@@ -608,6 +612,41 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training on uneven inputs, which acts as a wrapper around `torch.distributed.algorithms.join`.\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.NO:\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\n+ with contextlib.nullcontext(joinables):\n+ yield\n+\n+ elif self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ for dl in self._dataloaders:\n+ if not hasattr(dl, \"even_batches\"):\n+ raise ValueError(\"Overridding even_batches is not supported for iterable-style datasets\")\n+ dl_even_batches_values.append(dl.even_batches)\n+ dl.even_batches = even_batches\n+ else:\n+ even_batches = self.even_batches\n+\n+ enable_join = False if even_batches else True\n+ try:\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\n+ yield\n+ finally:\n+ for dl, even_batches_value in zip(self._dataloaders, dl_even_batches_values):\n+ dl.even_batches = even_batches_value\n+ else:\n+ raise ValueError(\"Joining uneven inputs is only supported for DistributedDataParallel training\")", "from_author": true }, { "body": "Let's add a warning here maybe? Longterm, `DataLoaderDispatcher` will also support the `even_batches` kwarg.", "diff_hunk": "@@ -608,6 +612,91 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\n+ length of the dataset.\n+\n+ Args:\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\n+ even_batches (`bool`, *optional*)\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\n+ the default `Accelerator` value wil be used.\n+\n+ <Tip warning={true}>\n+\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\n+ configuration, this method will have no effect.\n+\n+ </Tip>\n+\n+ <Tip warning={true}>\n+\n+ Overidding `even_batches` will not affect iterable-style data loaders.\n+\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(even_batches=True)\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\n+\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\n+ ... for input, output in dataloader:\n+ ... outputs = model(input)\n+ ... loss = loss_func(outputs)\n+ ... loss.backward()\n+ ... optimizer.step()\n+ ... optimizer.zero_grad()\n+ ```\n+\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ # override value in batch sampler for map-style datasets\n+ for dl_idx, dl in enumerate(self._dataloaders):\n+ if isinstance(dl, DataLoaderDispatcher):\n+ continue", "from_author": false }, { "body": "```suggestion\r\n \"Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect.\"\r\n```", "diff_hunk": "@@ -608,6 +612,91 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\n+ length of the dataset.\n+\n+ Args:\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\n+ even_batches (`bool`, *optional*)\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\n+ the default `Accelerator` value wil be used.\n+\n+ <Tip warning={true}>\n+\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\n+ configuration, this method will have no effect.\n+\n+ </Tip>\n+\n+ <Tip warning={true}>\n+\n+ Overidding `even_batches` will not affect iterable-style data loaders.\n+\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(even_batches=True)\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\n+\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\n+ ... for input, output in dataloader:\n+ ... outputs = model(input)\n+ ... loss = loss_func(outputs)\n+ ... loss.backward()\n+ ... optimizer.step()\n+ ... optimizer.zero_grad()\n+ ```\n+\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ # override value in batch sampler for map-style datasets\n+ for dl_idx, dl in enumerate(self._dataloaders):\n+ if isinstance(dl, DataLoaderDispatcher):\n+ continue\n+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))\n+ dl.batch_sampler.even_batches = even_batches\n+\n+ if len(dl_even_batches_values) == 0:\n+ warnings.warn(\n+ \"Overridding even_batches is only supported for map-style datasets, yet all dataloaders given were iterable\"\n+ )\n+ else:\n+ even_batches = self.even_batches\n+\n+ enable_join = False if even_batches else True\n+ try:\n+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):\n+ yield\n+ finally:\n+ # reset any batch samplers that have been modified\n+ for dl_idx, even_batches_value in dl_even_batches_values:\n+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value\n+ else:\n+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs\n+ if self.distributed_type != DistributedType.NO:\n+ warnings.warn(\n+ \"Joining uneven inputs is only supported for DistributedDataParallel training, join_unenven_inputs has no effect.\"", "from_author": false }, { "body": "Sure, I could change the condition on the warning that occurs if only `DataLoaderDispacher`s are provided, so that it warns if at least `DataLoaderDispacher` is seen. ", "diff_hunk": "@@ -608,6 +612,91 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\n+ length of the dataset.\n+\n+ Args:\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\n+ even_batches (`bool`, *optional*)\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\n+ the default `Accelerator` value wil be used.\n+\n+ <Tip warning={true}>\n+\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\n+ configuration, this method will have no effect.\n+\n+ </Tip>\n+\n+ <Tip warning={true}>\n+\n+ Overidding `even_batches` will not affect iterable-style data loaders.\n+\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(even_batches=True)\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\n+\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\n+ ... for input, output in dataloader:\n+ ... outputs = model(input)\n+ ... loss = loss_func(outputs)\n+ ... loss.backward()\n+ ... optimizer.step()\n+ ... optimizer.zero_grad()\n+ ```\n+\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ # override value in batch sampler for map-style datasets\n+ for dl_idx, dl in enumerate(self._dataloaders):\n+ if isinstance(dl, DataLoaderDispatcher):\n+ continue", "from_author": true }, { "body": "I have made the change now", "diff_hunk": "@@ -608,6 +612,91 @@ def accumulate(self, model):\n with context(model):\n yield\n \n+ @contextmanager\n+ def join_uneven_inputs(self, joinables, even_batches=None):\n+ \"\"\"\n+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper\n+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the\n+ length of the dataset.\n+\n+ Args:\n+ joinables (`List[torch.distributed.algorithms.Joinable]`):\n+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a\n+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.\n+ even_batches (`bool`, *optional*)\n+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,\n+ the default `Accelerator` value wil be used.\n+\n+ <Tip warning={true}>\n+\n+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other\n+ configuration, this method will have no effect.\n+\n+ </Tip>\n+\n+ <Tip warning={true}>\n+\n+ Overidding `even_batches` will not affect iterable-style data loaders.\n+\n+ </Tip>\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(even_batches=True)\n+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)\n+\n+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):\n+ ... for input, output in dataloader:\n+ ... outputs = model(input)\n+ ... loss = loss_func(outputs)\n+ ... loss.backward()\n+ ... optimizer.step()\n+ ... optimizer.zero_grad()\n+ ```\n+\n+ \"\"\"\n+ if is_torch_version(\"<\", \"1.10.0\"):\n+ raise ValueError(f\"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.\")\n+\n+ if self.distributed_type == DistributedType.MULTI_GPU:\n+ dl_even_batches_values = []\n+\n+ if even_batches is not None:\n+ # override value in batch sampler for map-style datasets\n+ for dl_idx, dl in enumerate(self._dataloaders):\n+ if isinstance(dl, DataLoaderDispatcher):\n+ continue", "from_author": true } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_820). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "@sgugger @muellerzr Is this the sort of thing that you had in mind? If so, I can write some docs and perhaps a small example.\r\n\r\nOne thing that I was thinking about whilst working on this though is that, although uneven batches is likely only going to be needed during evaluation, the approach that we have taken with setting a variable at the accelerator level is that it will apply to both training and validation. So an example would look something like:\r\n\r\n```\r\naccelerator = Accelerator(even_batches=True)\r\n\r\nmodel, optimizer, train_dataloader, val_dataloader = create_components()\r\n\r\nddp_model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)\r\n\r\n# training loop as normal\r\n...\r\n\r\n\r\naccelerator.even_batches = False\r\n\r\nval_dataloader = accelerator.prepare(val_dataloader)\r\n\r\n# run eval loop\r\nwith accelerator.join_uneven_inputs([ddp_model]):\r\n ... \r\n```\r\nIt's a bit clunky, but as it is an advanced use case, it may be acceptable.\r\n\r\nI'm interested to hear any thoughts that you have!", "from_author": true }, { "body": "@Chris-hughes10 @sgugger what do you think about something like this. Here's an MVP example showing the basic behavior:\r\n\r\n```python\r\nfrom contextlib import contextmanager\r\n\r\nclass A:\r\n \"A mock version of what we would call the dataloader\"\r\n def __init__(self, a:int):\r\n self.a = a\r\n\r\nclass B:\r\n \"A mock version of what we would call the Accelerator\"\r\n @contextmanager\r\n def do_the_thing(self, a):\r\n old_a = a.a\r\n a.a = 2\r\n try:\r\n yield\r\n finally:\r\n a.a = old_a\r\n\r\na = A(1)\r\nb = B()\r\n\r\nwith b.do_the_thing(a):\r\n print(a.a)\r\nprint(a.a)\r\n```\r\nYou'd find that `a.a` prints 2 in the first and 1 in the second. We can apply that similarly here since we expect if the user is calling this that they will be training/evaluating/doing things on uneven inputs, leading to an API such as:\r\n\r\n```python\r\nwith accelerator.join_uneven_inputs([model], dataloader=eval_dataloader):\r\n ...\r\n```\r\nInside this ctx manager `a.a` would be `eval_dataloader.even_batches`, and if it wasn't disabled originally we can print for the user that using this context manager will temporarily change this value or something. This let's us get rid of the explicit of setting in the accelerator and having to prepare later, so the only code change is the context manager. \r\n\r\nLmk what you both think\r\n\r\nOtherwise I think you've done a great job with it! Super small API design nit of my own πŸ˜„ \r\n\r\n(specifically this would do `eval_dataloader.even_batches=False` during the `a.a` bits in the context manager example)\r\n\r\nIt could probably go as far as accept a tuple of models and dataloaders and Accelerate could figure out which one is which and apply them rather than having to pass in a list πŸ€” Decisions decisions :) \r\n\r\nIf wanted I could do some of this in a follow up PR, as this PR is great already", "from_author": false }, { "body": "Agreed, maybe remove some of the magic with an `override` arg as well (that defaults to `False`) and a very explicit docstring as middle ground. (Also would love others opinions πŸ˜„ )", "from_author": false }, { "body": "Hi both, I have given this some thought and have a couple of ideas. \r\n\r\nMy first reaction was to agree with @sgugger, changing the `even_batches` value in the dataloader seems a little too magical, but it definitely makes the particular use case smoother. I think the main irk that I have with this is that it is changing a value on an unrelated object; if it was changing the value on the accelerator itself, I think it would be quite clean. Additionally, it seems a bit like a side effect, as it isn't really part of the same operation as the join.\r\n\r\nOne thought that I had, is that we could expose a method on the dataloader to make it easy to toggle this behaviour. Perhaps something like:\r\n```\r\naccelerator = Accelerator()\r\n\r\nmodel, optimizer, train_dataloader, val_dataloader = create_components()\r\n\r\nddp_model, optimizer, train_dataloader, val_dataloader = accelerator.prepare(model, optimizer, train_dataloader, val_dataloader)\r\n\r\n# training loop as normal\r\n...\r\n\r\n\r\nval_dataloader.ensure_even_batches(enabled=False)\r\n\r\n# run eval loop\r\nwith accelerator.join_uneven_inputs([ddp_model]):\r\n ... \r\n```\r\nThere is still an extra step, but then it is very explicit what is going on. Conversely, this does mean that the flag in the accelerator is a little redundant. Also, it probably means that, in most cases, setting this flag and the context manager will have to be used together.\r\n\r\nAs an alternative, if we would like to keep everything accelerator-centric, we could potentially have the accelerator maintain a reference to any prepared dataloaders. This way, if we change the setting on the accelerator, we could change the value on all dataloaders. This could look something like:\r\n\r\n```\r\naccelerator = Accelerator(even_batches=True)\r\n\r\nmodel, optimizer, train_dataloader, val_dataloader = create_components()\r\n\r\nddp_model, optimizer, train_dataloader, val_dataloader = accelerator.prepare(model, optimizer, train_dataloader, val_dataloader)\r\n\r\n# training loop as normal\r\n...\r\n\r\n# run eval loop\r\nwith accelerator.join_uneven_inputs([ddp_model], even_batches=False):\r\n ... \r\n```\r\nPersonally, I think that this is quite clean, but it does mean adding additional logic to the accelerator, and perhaps the actual difference between this solution and the one proposed by @muellerzr is a bit academic.\r\n\r\nEDIT: cc @pacman100 , I think we commented essentially at the same time. What do you think about the second approach proposed here?", "from_author": true }, { "body": "Yes, I like the second approach proposed above ✨", "from_author": false }, { "body": "@Chris-hughes10 I too like that solution, as it keeps in line with how we design bits like this such as the gradient accumulation wrapper πŸ˜„ It also keeps the dataloader preparation the same as optimizer and model in the sense of pointers. This just means that there would also need to be a change to `accelerator.free_memory()` to also free the dataloader references. \r\n\r\nI also think it should just be an `Accelerator` only attribute, there wouldn't be a need to put this in `AcceleratorState` since we deem that too magical already πŸ˜ƒ ", "from_author": false }, { "body": "Works for me too!", "from_author": false }, { "body": "It looks like there are failing tests that are unrelated to this PR. Perhaps https://github.com/huggingface/accelerate/pull/825 will solve the problem.", "from_author": true }, { "body": "@Chris-hughes10 correct, you can ignore the \"checkpoint\" and \"rest\" failures here. ", "from_author": false }, { "body": "I have made some more updates, so I think it should be feature complete now.\r\n\r\nUnfortunately, I am getting a couple of failing tests on the CI, which look like they occur in the bf16 section of the test script, comparing training on CPU and GPU. I am unable to reproduce locally as I don't have a device with bf16 available, and the rest of the tests in that script seem to pass. \r\n\r\nIt is quite strange though, because looking at what it is failing, it doesn't really interact with anything in this PR other than the accelerator maintaining references to the dataloader; which doesn't seem to be a problem for the other training configurations in the script. After noticing that the accelerator state is cleared multiple times in the test - which does not include the dataloaders, as discussed above - I also tried adding resetting the accelerator manually before the bf16 section, using `accelerator.free_memory`, but this did not make any difference. \r\n\r\nHonesty, I am stumped at this point. Do you guys have any ideas on what could be the issue?", "from_author": true }, { "body": "@Chris-hughes10 I'll be able to dig into this further tomorrow and see if I can't root out what's going on, so don't stress on it today πŸ˜ƒ ", "from_author": false }, { "body": "Hello @Chris-hughes10 , I went over the PR and tests. I found the cause of the tests failing. In the below screenshot please find the changes required in `test_accelerator.py` to resolve failing tests.\r\n\r\n<img width=\"764\" alt=\"Screenshot 2022-11-10 at 7 28 32 PM\" src=\"https://user-images.githubusercontent.com/13534540/201111269-367a3afa-5957-4b3e-8288-bbdd11107393.png\">\r\n", "from_author": false }, { "body": "> Hello @Chris-hughes10 , I went over the PR and tests. I found the cause of the tests failing. In the below screenshot please find the changes required in `test_accelerator.py` to resolve failing tests.\r\n> \r\n\r\n\r\nAwesome, thanks for this! I didn't anticipate that the state would carry over outside of each test, but I suppose it makes sense for a singleton. I will make the change.", "from_author": true }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_820). All of your documentation changes will be reflected on that endpoint.", "from_author": false }, { "body": "There seems to be a problem with building the PR documentation, is this something that I can fix?", "from_author": true }, { "body": "@Chris-hughes10 nope don't worry about that, it's a problem on our side :)", "from_author": false }, { "body": "Thanks a bunch @Chris-hughes10, excellent work! I'll be keeping an eye on our CI that runs multi-gpu tests post this merge to see if anything is up and I'll let you know if there is and I can't seem to figure out what may be failing quickly :) ", "from_author": false }, { "body": "Thanks for helping to get this merged @muellerzr @sgugger @pacman100! ", "from_author": true }, { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_820). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/820", "pr_id": 1110563062 }, { "diff": "diff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\nindex d26ecd4b5..23b024f75 100644\n--- a/docs/source/usage_guides/megatron_lm.mdx\n+++ b/docs/source/usage_guides/megatron_lm.mdx\n@@ -435,6 +435,103 @@ python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability\n --print-checkpoint-structure\n ```\n \n+## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation\n+\n+1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below. \n+These would be available on the in the last stage of pipeline.\n+```python\n+megatron_lm_plugin = MegatronLMPlugin(return_logits=True)\n+```\n+\n+2. `megatron_generate` method for Megatron-LM GPT model: This will use Tensor and Pipeline Parallelism to complete \n+generations for a batch of inputs when using greedy with/without top_k/top_p sampling and for individual prompt inputs when using beam search decoding. \n+Only a subset of features of transformers generate is supported. This will help in using large models via tensor and pipeline parallelism \n+for generation (already does key-value caching and uses fused kernels by default).\n+This requires data parallel size to be 1, sequence parallelism and activation checkpointing to be disabled.\n+It also requires specifying path to tokenizer's vocab file and merges file. \n+Below example shows how to configure and use `megatron_generate` method for Megatron-LM GPT model.\n+```python\n+# specifying tokenizer's vocab and merges file\n+vocab_file = os.path.join(args.resume_from_checkpoint, \"vocab.json\")\n+merge_file = os.path.join(args.resume_from_checkpoint, \"merges.txt\")\n+other_megatron_args = {\"vocab_file\": vocab_file, \"merge_file\": merge_file}\n+megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)\n+\n+# inference using `megatron_generate` functionality\n+tokenizer.pad_token = tokenizer.eos_token\n+max_new_tokens = 64\n+batch_texts = [\n+ \"Are you human?\",\n+ \"The purpose of life is\",\n+ \"The arsenal was constructed at the request of\",\n+ \"How are you doing these days?\",\n+]\n+batch_encodings = tokenizer(batch_texts, return_tensors=\"pt\", padding=True)\n+\n+# top-p sampling\n+generated_tokens = model.megatron_generate(\n+ batch_encodings[\"input_ids\"],\n+ batch_encodings[\"attention_mask\"],\n+ max_new_tokens=max_new_tokens,\n+ top_p=0.8,\n+ top_p_decay=0.5,\n+ temperature=0.9,\n+)\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\n+accelerator.print(decoded_preds)\n+\n+# top-k sampling\n+generated_tokens = model.megatron_generate(\n+ batch_encodings[\"input_ids\"],\n+ batch_encodings[\"attention_mask\"],\n+ max_new_tokens=max_new_tokens,\n+ top_k=50,\n+ temperature=0.9,\n+)\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\n+accelerator.print(decoded_preds)\n+\n+# adding `bos` token at the start\n+generated_tokens = model.megatron_generate(\n+ batch_encodings[\"input_ids\"], batch_encodings[\"attention_mask\"], max_new_tokens=max_new_tokens, add_BOS=True\n+)\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\n+accelerator.print(decoded_preds)\n+\n+# beam search => only takes single prompt\n+batch_texts = [\"The purpose of life is\"]\n+batch_encodings = tokenizer(batch_texts, return_tensors=\"pt\", padding=True)\n+generated_tokens = model.megatron_generate(\n+ batch_encodings[\"input_ids\"],\n+ batch_encodings[\"attention_mask\"],\n+ max_new_tokens=max_new_tokens,\n+ num_beams=20,\n+ length_penalty=1.5,\n+)\n+decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy())\n+accelerator.print(decoded_preds)\n+```\n+\n+3. An end-to-end example of using `megatron_generate` method for Megatron-LM GPT model is available at\n+[megatron_gpt2_generation.py](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/inference/megatron_gpt2_generation.py) with \n+config file [megatron_lm_gpt_generate_config.yaml](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/Configs/megatron_lm_gpt_generate_config.yaml).\n+The bash script with accelerate launch command is available at [megatron_lm_gpt_generate.sh](https://github.com/pacman100/accelerate-megatron-test/blob/main/megatron_lm_gpt_generate.sh).\n+The output logs of the script are available at [megatron_lm_gpt_generate.log](https://github.com/pacman100/accelerate-megatron-test/blob/main/output_logs/megatron_lm_gpt_generate.log).\n+\n+## Support for ROPE and ALiBi Positional embeddings and Multi-Query Attention\n+\n+1. For ROPE/ALiBi attention, pass `position_embedding_type` with `(\"absolute\" | \"rotary\" | \"alibi\")` to `MegatronLMPlugin` as shown below.\n+```python\n+other_megatron_args = {\"position_embedding_type\": \"alibi\"}\n+megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)\n+```\n+\n+2. For Multi-Query Attention, pass `attention_head_type` with `(\"multihead\" | \"multiquery\")` to `MegatronLMPlugin` as shown below.\n+```python\n+other_megatron_args = {\"attention_head_type\": \"multiquery\"}\n+megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)\n+```\n+\n ## Caveats\n \n 1. Supports Transformers GPT2, Megatron-BERT and T5 models.\n@@ -445,8 +542,12 @@ there is quite complex interplay of pipeline, tensor and data parallelsim behind\n The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.\n This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and\n you can easily compute the `perplexity` using the loss. \n+For GPT model, returning logits in addition to loss(es) is supported. \n+These logits aren't gathered across data prallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`\n+to gather logits across data parallel ranks. These logits along with labels can be used for computing various \n+performance metrics. \n \n-3. The main process is the last rank as the losses are available in the last stage of pipeline.\n+3. The main process is the last rank as the losses/logits are available in the last stage of pipeline.\n `accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using \n Megatron-LM integration.\n \ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex b472ec556..0e67daf4b 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -107,6 +107,7 @@\n MegatronLMSchedulerWrapper,\n T5TrainStep,\n avg_losses_across_data_parallel_group,\n+ gather_across_data_parallel_groups,\n )\n from .megatron_lm import initialize as megatron_lm_initialize\n from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 8227fa5bc..5fcb5e68f 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -771,6 +771,18 @@ class MegatronLMPlugin:\n default=False,\n metadata={\"help\": \"Whether to set all logging options.\"},\n )\n+ eval_iters: int = field(\n+ default=100, metadata={\"help\": \"Number of iterations to run for evaluation validation/test for.\"}\n+ )\n+ eval_interval: int = field(\n+ default=1000, metadata={\"help\": \"Interval between running evaluation on validation set.\"}\n+ )\n+ return_logits: bool = field(\n+ default=False,\n+ metadata={\"help\": \"Whether to return logits from the model.\"},\n+ )\n+\n+ # custom train step args\n custom_train_step_class: Optional[Any] = field(\n default=None,\n metadata={\"help\": \"Custom train step class.\"},\n@@ -779,11 +791,22 @@ class MegatronLMPlugin:\n default=None,\n metadata={\"help\": \"Custom train step kwargs.\"},\n )\n- eval_iters: int = field(\n- default=100, metadata={\"help\": \"Number of iterations to run for evaluation validation/test for.\"}\n+\n+ # custom model args\n+ custom_model_provider_function: Optional[Callable] = field(\n+ default=None,\n+ metadata={\"help\": \"Custom model provider function.\"},\n )\n- eval_interval: int = field(\n- default=1000, metadata={\"help\": \"Interval between running evaluation on validation set.\"}\n+ custom_prepare_model_function: Optional[Callable] = field(\n+ default=None,\n+ metadata={\"help\": \"Custom prepare model function.\"},\n+ )\n+\n+ # remaining args such as enabling Alibi/ROPE positional embeddings,\n+ # wandb logging, Multi-Query Attention, etc.\n+ other_megatron_args: Optional[Dict[str, Any]] = field(\n+ default=None,\n+ metadata={\"help\": \"Other Megatron-LM arguments. Please refer Megatron-LM\"},\n )\n \n def __post_init__(self):\n@@ -840,6 +863,8 @@ def __post_init__(self):\n self.megatron_lm_default_args[\"tensorboard_dir\"] = self.tensorboard_dir\n if self.set_all_logging_options:\n self.set_tensorboard_logging_options()\n+ if self.other_megatron_args is not None:\n+ self.megatron_lm_default_args.update(self.other_megatron_args)\n \n def set_network_size_args(self, model, batch_data=None):\n # Check if the model is either BERT, GPT or T5 else raise error\n@@ -884,6 +909,8 @@ def set_network_size_args(self, model, batch_data=None):\n else:\n self.seq_length = max_position_embeddings\n self.megatron_lm_default_args[\"seq_length\"] = self.seq_length\n+ self.megatron_lm_default_args[\"return_logits\"] = self.return_logits\n+ self.megatron_lm_default_args[\"tokenizer_type\"] = \"GPT2BPETokenizer\"\n elif \"t5\" in model.config.model_type.lower():\n model_type_name = \"t5\"\n num_layers = model.config.num_layers\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex bdb53988a..85e20a746 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -13,7 +13,9 @@\n # limitations under the License.\n \n import importlib\n+import os\n import sys\n+from distutils.util import strtobool\n from functools import lru_cache\n \n import torch\n@@ -90,10 +92,11 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- package_exists = importlib.util.find_spec(\"megatron\") is not None\n- if package_exists:\n- megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n- return compare_versions(megatron_version, \">=\", \"2.2.0\")\n+ if strtobool(os.environ.get(\"USE_MEGATRON_LM\", \"False\")) == 1:\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\n+ if package_exists:\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\n return False\n \n \ndiff --git a/src/accelerate/utils/megatron_lm.py b/src/accelerate/utils/megatron_lm.py\nindex cb988e360..afee2fcca 100644\n--- a/src/accelerate/utils/megatron_lm.py\n+++ b/src/accelerate/utils/megatron_lm.py\n@@ -20,11 +20,12 @@\n import torch\n import torch.nn.functional as F\n from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n+from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP\n \n from ..optimizer import AcceleratedOptimizer\n from ..scheduler import AcceleratedScheduler\n from .imports import is_megatron_lm_available, is_transformers_available\n-from .operations import send_to_device\n+from .operations import recursively_apply, send_to_device\n \n \n if is_transformers_available():\n@@ -41,6 +42,7 @@\n get_num_microbatches,\n get_tensorboard_writer,\n get_timers,\n+ get_tokenizer,\n mpu,\n print_rank_0,\n print_rank_last,\n@@ -56,16 +58,24 @@\n set_jit_fusion_options,\n write_args_to_tensorboard,\n )\n- from megatron.model import BertModel, GPTModel, ModelType, T5Model\n+ from megatron.model import BertModel\n+ from megatron.model import DistributedDataParallel as LocalDDP\n+ from megatron.model import Float16Module, GPTModel, ModelType, T5Model\n from megatron.model.classification import Classification\n from megatron.optimizer import get_megatron_optimizer\n from megatron.schedules import get_forward_backward_func\n+ from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor\n+ from megatron.text_generation.generation import (\n+ beam_search_and_return_on_first_stage,\n+ generate_tokens_probs_and_return_on_first_stage,\n+ )\n from megatron.tokenizer.tokenizer import _vocab_size_with_padding\n from megatron.training import get_model, get_optimizer_param_scheduler, training_log\n from megatron.utils import (\n average_losses_across_data_parallel_group,\n calc_params_l2_norm,\n get_ltor_masks_and_position_ids,\n+ unwrap_model,\n )\n \n \n@@ -114,13 +124,21 @@ def model_provider_func(pre_process=True, post_process=True, add_encoder=True, a\n def prepare_model(accelerator):\n accelerator.print(\"Preparing model\")\n args = get_args()\n- if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\n- model_type = ModelType.encoder_or_decoder\n- elif args.model_type_name == \"t5\":\n- model_type = ModelType.encoder_and_decoder\n- if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:\n- args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2\n- model = get_model(model_provider_func, model_type)\n+ if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None:\n+ if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None:\n+ raise ValueError(\n+ \"You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`.\"\n+ )\n+ custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function\n+ model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func)\n+ else:\n+ if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\n+ model_type = ModelType.encoder_or_decoder\n+ elif args.model_type_name == \"t5\":\n+ model_type = ModelType.encoder_and_decoder\n+ if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:\n+ args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2\n+ model = get_model(model_provider_func, model_type)\n return model\n \n \n@@ -593,6 +611,12 @@ def __init__(self, args):\n self.loss_func = self.get_loss_func()\n self.forward_step = self.get_forward_step_func()\n self.eod_token = args.padded_vocab_size - 1\n+ if args.vocab_file is not None:\n+ tokenizer = get_tokenizer()\n+ self.eod_token = tokenizer.eod\n+ self.reset_position_ids = args.reset_position_ids\n+ self.reset_attention_mask = args.reset_attention_mask\n+ self.eod_mask_loss = args.eod_mask_loss\n if not args.model_return_dict:\n self.model_output_class = None\n else:\n@@ -601,8 +625,6 @@ def __init__(self, args):\n def get_batch_func(self, megatron_dataset_flag):\n def get_batch_megatron(data_iterator):\n \"\"\"Generate a batch\"\"\"\n- args = get_args()\n-\n # Items and their type.\n keys = [\"text\"]\n datatype = torch.int64\n@@ -621,7 +643,7 @@ def get_batch_megatron(data_iterator):\n \n # Get the masks and postition ids.\n attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\n- tokens, self.eod_token, args.reset_position_ids, args.reset_attention_mask, args.eod_mask_loss\n+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss\n )\n \n return tokens, labels, loss_mask, attention_mask, position_ids\n@@ -638,7 +660,7 @@ def get_batch_transformer(data_iterator):\n tokens = tokens_[:, :-1].contiguous()\n # Get the masks and postition ids.\n attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\n- tokens, self.eod_token, False, False, True\n+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True\n )\n return tokens, labels, loss_mask, attention_mask, position_ids\n \n@@ -648,15 +670,24 @@ def get_batch_transformer(data_iterator):\n return get_batch_transformer\n \n def get_loss_func(self):\n+ args = get_args()\n+\n def loss_func(loss_mask, output_tensor):\n- losses = output_tensor.float()\n+ if args.return_logits:\n+ losses, logits = output_tensor\n+ else:\n+ losses = output_tensor\n+ losses = losses.float()\n loss_mask = loss_mask.view(-1).float()\n loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()\n \n # Reduce loss for logging.\n averaged_loss = average_losses_across_data_parallel_group([loss])\n \n- return loss, {\"lm loss\": averaged_loss[0]}\n+ output_dict = {\"lm loss\": averaged_loss[0]}\n+ if args.return_logits:\n+ output_dict.update({\"logits\": logits})\n+ return loss, output_dict\n \n return loss_func\n \n@@ -1031,7 +1062,10 @@ def train_step(self, **batch_data):\n loss_reduced = {}\n for key in losses_reduced[0]:\n losses_reduced_for_key = [x[key] for x in losses_reduced]\n- loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n+ if len(losses_reduced_for_key[0].shape) == 0:\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n+ else:\n+ loss_reduced[key] = torch.concat(losses_reduced_for_key)\n return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad\n return {}, skipped_iter, grad_norm, num_zeros_in_grad\n \n@@ -1079,7 +1113,10 @@ def eval_step(self, **batch_data):\n loss_reduced = {}\n for key in loss_dicts[0]:\n losses_reduced_for_key = [x[key] for x in loss_dicts]\n- loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n+ if len(losses_reduced_for_key[0].shape) == 0:\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n+ else:\n+ loss_reduced[key] = torch.concat(losses_reduced_for_key)\n return loss_reduced\n else:\n return {}\n@@ -1134,10 +1171,15 @@ def forward(self, **batch_data):\n \n loss = torch.tensor(0.0, device=args.local_rank)\n for key in loss_dict:\n- loss += loss_dict[key]\n+ if len(loss_dict[key].shape) == 0:\n+ loss += loss_dict[key]\n+\n+ logits = None\n+ if \"logits\" in loss_dict:\n+ logits = loss_dict[\"logits\"]\n # loss = reduce(loss)\n if self.train_step_handler.model_output_class is not None:\n- return self.train_step_handler.model_output_class(loss=loss)\n+ return self.train_step_handler.model_output_class(loss=loss, logits=logits)\n return loss\n \n def log_eval_results(self):\n@@ -1186,6 +1228,184 @@ def load_checkpoint(self, input_dir):\n if args.fp16 and self.iteration == 0:\n self.optimizer.reload_model_params()\n \n+ def megatron_generate(\n+ self,\n+ inputs,\n+ attention_mask=None,\n+ max_length=None,\n+ max_new_tokens=None,\n+ num_beams=None,\n+ temperature=None,\n+ top_k=None,\n+ top_p=None,\n+ length_penalty=None,\n+ **kwargs,\n+ ):\n+ \"\"\"\n+ Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along\n+ with sampling. Refer the Megatron-LM repo for more details\n+\n+ Args:\n+ inputs (torch.Tensor): input ids\n+ attention_mask (torch.Tensor, optional): attention mask. Defaults to None.\n+ max_length (int, optional): max length of the generated sequence. Defaults to None.\n+ Either this or max_new_tokens should be provided.\n+ max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None.\n+ Either this or max_length should be provided.\n+ num_beams (int, optional): number of beams to use for beam search. Defaults to None.\n+ temperature (float, optional): temperature for sampling. Defaults to 1.0.\n+ top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0.\n+ top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0.\n+ length_penalty (float, optional): length penalty for beam search. Defaults to None.\n+ kwargs: additional key-value arguments\n+ \"\"\"\n+\n+ # checking if required arguments are passed\n+ args = get_args()\n+ if args.model_type_name != \"gpt\":\n+ raise NotImplementedError(\"Generate method is not implemented for this model\")\n+\n+ if args.data_parallel_size > 1:\n+ raise ValueError(\"Generate method requires data parallelism to be 1\")\n+\n+ if args.sequence_parallel:\n+ raise ValueError(\"Generate method requires sequence parallelism to be False\")\n+\n+ if args.recompute_granularity is not None:\n+ raise ValueError(\"Checkpoint activations cannot be set for inference\")\n+\n+ if args.vocab_file is None:\n+ raise ValueError(\"Vocab file is required for inference\")\n+\n+ # Prepare inputs\n+ if max_length is None and max_new_tokens is None:\n+ raise ValueError(\"`max_length` or `max_new_tokens` are required for inference\")\n+\n+ if temperature is None:\n+ temperature = 1.0\n+ elif not (0.0 < temperature <= 100.0):\n+ raise ValueError(\"temperature must be a positive number less than or equal to 100.0\")\n+\n+ if top_k is None:\n+ top_k = 0\n+ elif not (0 <= top_k <= 1000):\n+ raise ValueError(\"top_k must be a positive number less than or equal to 1000\")\n+\n+ if top_p is None:\n+ top_p = 0.0\n+ elif top_p > 0.0 and top_k > 0.0:\n+ raise ValueError(\"top_p and top_k sampling cannot be set together\")\n+ else:\n+ if not (0.0 <= top_p <= 1.0):\n+ raise ValueError(\"top_p must be less than or equal to 1.0\")\n+\n+ top_p_decay = kwargs.get(\"top_p_decay\", 0.0)\n+ if not (0.0 <= top_p_decay <= 1.0):\n+ raise ValueError(\"top_p_decay must be less than or equal to 1.0\")\n+\n+ top_p_bound = kwargs.get(\"top_p_bound\", 0.0)\n+ if not (0.0 <= top_p_bound <= 1.0):\n+ raise ValueError(\"top_p_bound must be less than or equal to 1.0\")\n+\n+ add_BOS = kwargs.get(\"add_BOS\", False)\n+ if not (isinstance(add_BOS, bool)):\n+ raise ValueError(\"add_BOS must be a boolean\")\n+\n+ beam_width = num_beams\n+ if beam_width is not None:\n+ if not isinstance(beam_width, int):\n+ raise ValueError(\"beam_width must be an integer\")\n+ if beam_width < 1:\n+ raise ValueError(\"beam_width must be greater than 0\")\n+ if inputs.shape[0] > 1:\n+ return \"When doing beam_search, batch size must be 1\"\n+\n+ tokenizer = get_tokenizer()\n+\n+ stop_token = kwargs.get(\"stop_token\", tokenizer.eod)\n+ if stop_token is not None:\n+ if not isinstance(stop_token, int):\n+ raise ValueError(\"stop_token must be an integer\")\n+\n+ if length_penalty is None:\n+ length_penalty = 1.0\n+\n+ sizes_list = None\n+ prompts_tokens_tensor = None\n+ prompts_length_tensor = None\n+ if torch.distributed.get_rank() == 0:\n+ # Get the prompts length.\n+ if attention_mask is None:\n+ prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0])\n+ else:\n+ prompts_length_tensor = attention_mask.sum(axis=-1).cuda()\n+\n+ if max_new_tokens is None:\n+ max_new_tokens = max_length - inputs.shape[1]\n+ if max_new_tokens <= 0:\n+ raise ValueError(\"max_new_tokens must be greater than 0\")\n+\n+ if add_BOS:\n+ max_length = max_new_tokens + inputs.shape[1] + 1\n+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels\n+ max_length = 4 * math.ceil(max_length / 4)\n+ max_new_tokens = max_length - (inputs.shape[1] + 1)\n+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])\n+ prompts_tokens_tensor = torch.concat(\n+ [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1\n+ )\n+ else:\n+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels\n+ max_length = max_new_tokens + inputs.shape[1]\n+ max_length = 4 * math.ceil(max_length / 4)\n+ max_new_tokens = max_length - inputs.shape[1]\n+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])\n+ prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1)\n+\n+ # We need the sizes of these tensors for the boradcast\n+ sizes_list = [\n+ prompts_tokens_tensor.size(0), # Batch size\n+ prompts_tokens_tensor.size(1),\n+ ] # Sequence lenght\n+\n+ # First, broadcast the sizes.\n+ sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0)\n+\n+ # Now that we have the sizes, we can boradcast the tokens\n+ # and length tensors.\n+ sizes = sizes_tensor.tolist()\n+ context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0)\n+ context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0)\n+\n+ # Run the inference\n+ random_seed = kwargs.get(\"random_seed\", 0)\n+ torch.random.manual_seed(random_seed)\n+ unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module))\n+ if beam_width is not None:\n+ tokens, _ = beam_search_and_return_on_first_stage(\n+ unwrapped_model,\n+ context_tokens_tensor,\n+ context_length_tensor,\n+ beam_width,\n+ stop_token=stop_token,\n+ num_return_gen=1,\n+ length_penalty=length_penalty,\n+ )\n+ else:\n+ tokens, _, _ = generate_tokens_probs_and_return_on_first_stage(\n+ unwrapped_model,\n+ context_tokens_tensor,\n+ context_length_tensor,\n+ return_output_log_probs=False,\n+ top_k=top_k,\n+ top_p=top_p,\n+ top_p_decay=top_p_decay,\n+ top_p_bound=top_p_bound,\n+ temperature=temperature,\n+ use_eod_token_for_early_termination=True,\n+ )\n+ return tokens\n+\n \n # other utilities\n def avg_losses_across_data_parallel_group(losses):\n@@ -1197,3 +1417,25 @@ def avg_losses_across_data_parallel_group(losses):\n \"\"\"\n \n return average_losses_across_data_parallel_group(losses)\n+\n+\n+def gather_across_data_parallel_groups(tensor):\n+ \"\"\"\n+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks.\n+\n+ Args:\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n+ The data to gather across data parallel ranks.\n+\n+ \"\"\"\n+\n+ def _gpu_gather_one(tensor):\n+ if tensor.ndim == 0:\n+ tensor = tensor.clone()[None]\n+ output_tensors = [\n+ tensor.clone() for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group()))\n+ ]\n+ torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group())\n+ return torch.cat(output_tensors, dim=0)\n+\n+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)\n", "code_comments": [ { "body": "Here you should lowercase, or use the str2bool conversion, just to make sure to catch variants.", "diff_hunk": "@@ -90,10 +91,11 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- package_exists = importlib.util.find_spec(\"megatron\") is not None\n- if package_exists:\n- megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n- return compare_versions(megatron_version, \">=\", \"2.2.0\")\n+ if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":", "from_author": false }, { "body": "Fits on one line.", "diff_hunk": "@@ -1134,10 +1171,18 @@ def forward(self, **batch_data):\n \n loss = torch.tensor(0.0, device=args.local_rank)\n for key in loss_dict:\n- loss += loss_dict[key]\n+ if len(loss_dict[key].shape) == 0:\n+ loss += loss_dict[key]\n+\n+ logits = None\n+ if \"logits\" in loss_dict:\n+ logits = loss_dict[\"logits\"]\n # loss = reduce(loss)\n if self.train_step_handler.model_output_class is not None:\n- return self.train_step_handler.model_output_class(loss=loss)\n+ return self.train_step_handler.model_output_class(\n+ loss=loss,\n+ logits=logits,\n+ )", "from_author": false }, { "body": "Very reluctant to add this here with the same name as in Transformers. This method does not have the same signature/behavior so users will be very confused to get something different. ", "diff_hunk": "@@ -1186,6 +1231,184 @@ def load_checkpoint(self, input_dir):\n if args.fp16 and self.iteration == 0:\n self.optimizer.reload_model_params()\n \n+ def generate(", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -90,10 +91,11 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- package_exists = importlib.util.find_spec(\"megatron\") is not None\n- if package_exists:\n- megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n- return compare_versions(megatron_version, \">=\", \"2.2.0\")\n+ if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -1134,10 +1171,18 @@ def forward(self, **batch_data):\n \n loss = torch.tensor(0.0, device=args.local_rank)\n for key in loss_dict:\n- loss += loss_dict[key]\n+ if len(loss_dict[key].shape) == 0:\n+ loss += loss_dict[key]\n+\n+ logits = None\n+ if \"logits\" in loss_dict:\n+ logits = loss_dict[\"logits\"]\n # loss = reduce(loss)\n if self.train_step_handler.model_output_class is not None:\n- return self.train_step_handler.model_output_class(loss=loss)\n+ return self.train_step_handler.model_output_class(\n+ loss=loss,\n+ logits=logits,\n+ )", "from_author": true }, { "body": "renamed to `megatron_generate`", "diff_hunk": "@@ -1186,6 +1231,184 @@ def load_checkpoint(self, input_dir):\n if args.fp16 and self.iteration == 0:\n self.optimizer.reload_model_params()\n \n+ def generate(", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/819", "pr_id": 1109839693 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex f7424d656..9643d3b4f 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -177,6 +177,37 @@ The following arguments are only useful when training in SageMaker\n * `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\n * `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\n \n+## accelerate tpu-config\n+\n+`accelerate tpu-config`\n+\n+**Usage**:\n+\n+```bash\n+accelerate tpu-config [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+\n+**Config Arguments**:\n+\n+Arguments that can be configured through `accelerate config`.\n+\n+* `--config_file` (`str`) -- Path to the config file to use for accelerate.\n+* `--tpu_name` (`str`) -- The name of the TPU to use. If not specified, will use the TPU specified in the config file.\n+* `--tpu_zone` (`str`) -- The zone of the TPU to use. If not specified, will use the zone specified in the config file.\n+\n+**TPU Arguments**:\n+\n+Arguments for options ran inside the TPU.\n+\n+* `--command_file` (`str`) -- The path to the file containing the commands to run on the pod on startup.\n+* `--command` (`str`) -- A command to run on the pod. Can be passed multiple times.\n+* `--install_accelerate` (`bool`) -- Whether to install accelerate on the pod. Defaults to False.\n+* `--accelerate_version` (`str`) -- The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\n+* `--debug` (`bool`) -- If set, will print the command that would be run instead of running it.\n+\n ## accelerate test\n \n `accelerate test` or `accelerate-test`\ndiff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\nindex 515a66d1a..f0e76fd2c 100644\n--- a/src/accelerate/commands/accelerate_cli.py\n+++ b/src/accelerate/commands/accelerate_cli.py\n@@ -19,8 +19,8 @@\n from accelerate.commands.config import config_command_parser\n from accelerate.commands.env import env_command_parser\n from accelerate.commands.launch import launch_command_parser\n-from accelerate.commands.pod import pod_command_parser\n from accelerate.commands.test import test_command_parser\n+from accelerate.commands.tpu import tpu_command_parser\n \n \n def main():\n@@ -31,7 +31,7 @@ def main():\n config_command_parser(subparsers=subparsers)\n env_command_parser(subparsers=subparsers)\n launch_command_parser(subparsers=subparsers)\n- pod_command_parser(subparsers=subparsers)\n+ tpu_command_parser(subparsers=subparsers)\n test_command_parser(subparsers=subparsers)\n \n # Let's go\ndiff --git a/src/accelerate/commands/pod.py b/src/accelerate/commands/tpu.py\nsimilarity index 81%\nrename from src/accelerate/commands/pod.py\nrename to src/accelerate/commands/tpu.py\nindex 87cd4f904..59bbb08e9 100644\n--- a/src/accelerate/commands/pod.py\n+++ b/src/accelerate/commands/tpu.py\n@@ -22,70 +22,66 @@\n from packaging.version import Version, parse\n \n \n-_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+_description = \"Run commands across TPU VMs for initial setup before running `accelerate launch`.\"\n \n \n-def pod_command_parser(subparsers=None):\n+def tpu_command_parser(subparsers=None):\n if subparsers is not None:\n- parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ parser = subparsers.add_parser(\"tpu-config\", description=_description)\n else:\n- parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n-\n- parser.add_argument(\n+ parser = argparse.ArgumentParser(\"Accelerate tpu-config command\", description=_description)\n+ # Core arguments\n+ config_args = parser.add_argument_group(\n+ \"Config Arguments\", \"Arguments that can be configured through `accelerate config`.\"\n+ )\n+ config_args.add_argument(\n \"--config_file\",\n type=str,\n default=None,\n help=\"Path to the config file to use for accelerate.\",\n )\n-\n- parser.add_argument(\n- \"--pod_config_file\",\n- type=str,\n+ config_args.add_argument(\n+ \"--tpu_name\",\n default=None,\n- help=\"Path to the config file to use for the pod.\",\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n )\n-\n- parser.add_argument(\n+ config_args.add_argument(\n+ \"--tpu_zone\",\n+ default=None,\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ )\n+ pod_args = parser.add_argument_group(\"TPU Arguments\", \"Arguments for options ran inside the TPU.\")\n+ pod_args.add_argument(\n \"--command_file\",\n default=None,\n help=\"The path to the file containing the commands to run on the pod on startup.\",\n )\n- parser.add_argument(\n+ pod_args.add_argument(\n \"--command\",\n action=\"append\",\n nargs=\"+\",\n- help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\n- )\n- parser.add_argument(\n- \"--tpu_name\",\n- default=None,\n- help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n- )\n- parser.add_argument(\n- \"--tpu_zone\",\n- default=None,\n- help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ help=\"A command to run on the pod. Can be passed multiple times.\",\n )\n- parser.add_argument(\n+ pod_args.add_argument(\n \"--install_accelerate\",\n action=\"store_true\",\n help=\"Whether to install accelerate on the pod. Defaults to False.\",\n )\n- parser.add_argument(\n+ pod_args.add_argument(\n \"--accelerate_version\",\n default=\"latest\",\n help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\",\n )\n- parser.add_argument(\n+ pod_args.add_argument(\n \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\n )\n \n if subparsers is not None:\n- parser.set_defaults(func=pod_launcher)\n+ parser.set_defaults(func=tpu_command_launcher)\n return parser\n \n \n-def pod_launcher(args):\n+def tpu_command_launcher(args):\n defaults = None\n \n # Get the default from the config file if it exists.\n@@ -146,7 +142,7 @@ def pod_launcher(args):\n \n \n def main():\n- parser = pod_command_parser()\n+ parser = tpu_command_parser()\n args = parser.parse_args()\n \n- pod_launcher(args)\n+ tpu_command_launcher(args)\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\nindex 2479f736d..50ed87d9f 100644\n--- a/tests/test_cli.py\n+++ b/tests/test_cli.py\n@@ -66,15 +66,15 @@ def test_config_compatibility(self):\n )\n \n \n-class PodConfigTester(unittest.TestCase):\n+class TpuConfigTester(unittest.TestCase):\n \"\"\"\n- Test case for verifying the `accelerate pod-config` CLI passes the right `gcloud` command.\n+ Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command.\n \"\"\"\n \n tpu_name = \"test-tpu\"\n tpu_zone = \"us-central1-a\"\n command = \"ls\"\n- cmd = [\"accelerate\", \"pod-config\"]\n+ cmd = [\"accelerate\", \"tpu-config\"]\n base_output = \"cd /usr/share\"\n command_file = \"tests/test_samples/test_command_file.sh\"\n gcloud = \"Running gcloud compute tpus tpu-vm ssh\"\n", "code_comments": [ { "body": "This arg was never used in the end, but not removed. ", "diff_hunk": "@@ -22,70 +22,66 @@\n from packaging.version import Version, parse\n \n \n-_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+_description = \"Run commands across TPU VMs for initial setup before running `accelerate launch`.\"\n \n \n-def pod_command_parser(subparsers=None):\n+def tpu_command_parser(subparsers=None):\n if subparsers is not None:\n- parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ parser = subparsers.add_parser(\"tpu-config\", description=_description)\n else:\n- parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n-\n- parser.add_argument(\n+ parser = argparse.ArgumentParser(\"Accelerate tpu-config command\", description=_description)\n+ # Core arguments\n+ config_args = parser.add_argument_group(\n+ \"Config Arguments\", \"Arguments that can be configured through `accelerate config`.\"\n+ )\n+ config_args.add_argument(\n \"--config_file\",\n type=str,\n default=None,\n help=\"Path to the config file to use for accelerate.\",\n )\n-\n- parser.add_argument(\n- \"--pod_config_file\",\n- type=str,", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/818", "pr_id": 1109536915 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex 52e8ecfb6..f7424d656 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -78,63 +78,102 @@ accelerate launch [arguments] {training_script} --{training_script-argument-1} -\n \n * `-h`, `--help` (`bool`) -- Show a help message and exit\n * `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script.\n-* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.\n-* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on\n- Nvidia Ampere GPUs and PyTorch 1.10 or later.\n-* `--multi_gpu` (`bool`, defaults to `False`) -- Whether or not this should launch a distributed GPU training.\n * `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\n * `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\n+* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.\n \n The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their \n values. They can also be passed in manually.\n \n-**Machine Configuration Arguments**:\n+**Hardware Selection Arguments**:\n \n-The following arguments are useful for customization of worker machines\n-* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\n-* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\n+* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.\n+* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.\n+* `--mps` (`bool`) -- Whether or not this should use MPS-enabled GPU device on MacOS machines.\n+* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.\n+\n+**Resource Selection Arguments**:\n+\n+The following arguments are useful for fine-tuning how available hardware should be used\n+\n+* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\n * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\n+* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\n+* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\n+\n+**Training Paradigm Arguments**:\n+\n+The following arguments are useful for selecting which training paradigm to use.\n+\n+* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.\n+* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.\n+* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.\n+\n+**Distributed GPU Arguments**:\n+\n+The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`: \n+\n * `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list\n * `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.\n+* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\n * `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\n * `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\n * `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).\n-* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\n * `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.\n * `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.\n \n+**TPU Arguments**:\n+\n+The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`: \n+\n+* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\n+* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\n+\n **DeepSpeed Arguments**:\n \n-The following arguments are only useful when `use_deepspeed` is passed: \n-* `--use_deepspeed` (`bool`) -- Whether to use deepspeed.\n-* `--deepspeed_config_file DEEPSPEED_CONFIG_FILE` (`str`) -- DeepSpeed config file.\n-* `--zero_stage ZERO_STAGE` (`str`) -- DeepSpeed's ZeRO optimization stage\n-* `--offload_optimizer_device OFFLOAD_OPTIMIZER_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states\n-* `--offload_param_device OFFLOAD_PARAM_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload parameters\n-* `--gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS` (`int`) -- Number of gradient_accumulation_steps used in your training script\n-* `--gradient_clipping GRADIENT_CLIPPING` (`float`) -- gradient clipping value used in your training script\n-The following arguments are related to using ZeRO Stage-3\n-* `--zero3_init_flag ZERO3_INIT_FLAG` (`bool`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models\n-* `--zero3_save_16bit_model ZERO3_SAVE_16BIT_MODEL` (`bool`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3\n+The following arguments are only useful when `use_deepspeed` is passed or `deepspeed` is configured through `accelerate config`: \n+\n+* `--deepspeed_config_file` (`str`) -- DeepSpeed config file.\n+* `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage.\n+* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states.\n+* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters.\n+* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script.\n+* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script.\n+* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.\n+* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3.\n+* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources.\n+* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup.\n+* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup.\n+* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.\n \n **Fully Sharded Data Parallelism Arguments**:\n \n-The following arguments are only useful when `use_fdsp` is passed:\n-* `--use_fsdp` (`bool`) -- Whether to use fsdp.\n-* `--offload_params OFFLOAD_PARAMS` (`bool`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.\n-* `--min_num_params MIN_NUM_PARAMS` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.\n-* `--sharding_strategy SHARDING_STRATEGY` (`str`) -- FSDP's Sharding Strategy.\n+The following arguments are only useful when `use_fdsp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:\n \n-**TPU Arguments**:\n+* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.\n+* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.\n+* `--fsdp_sharding_strategy` (`int`) -- FSDP's Sharding Strategy.\n+* `--fsdp_auto_wrap_policy` (`str`) -- FSDP's auto wrap policy.\n+* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...\n+* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.\n+* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.\n \n-The following arguments are only useful when `tpu` is passed:\n-* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.\n-* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\n-* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\n+**Megatron-LM Arguments**:\n+\n+The following arguments are only useful when `use_megatron_lm` is passed or Megatron-LM is configured through `accelerate config`:\n+\n+* `--megatron_lm_tp_degree` (``) -- Megatron-LM's Tensor Parallelism (TP) degree.\n+* `--megatron_lm_pp_degree` (``) -- Megatron-LM's Pipeline Parallelism (PP) degree.\n+* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.\n+* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.\n+* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.\n+* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks.\n+* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).\n \n **AWS SageMaker Arguments**:\n \n The following arguments are only useful when training in SageMaker\n+\n * `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\n * `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 415b06906..46d95c713 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -64,6 +64,7 @@\n options_to_group = {\n \"--multi-gpu\": \"Distributed GPUs\",\n \"--tpu\": \"TPU\",\n+ \"--mps\": \"MPS\",\n \"--use_mps_device\": \"MPS\",\n \"--use_deepspeed\": \"DeepSpeed Arguments\",\n \"--use_fsdp\": \"FSDP Arguments\",\n@@ -144,6 +145,12 @@ def launch_command_parser(subparsers=None):\n hardware_args.add_argument(\n \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\n )\n+ hardware_args.add_argument(\n+ \"--mps\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\n+ )\n hardware_args.add_argument(\n \"--multi_gpu\",\n default=False,\n@@ -157,7 +164,7 @@ def launch_command_parser(subparsers=None):\n \"--use_mps_device\",\n default=False,\n action=\"store_true\",\n- help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\n+ help=\"This argument is deprecated, use `--mps` instead.\",\n )\n \n # Resource selection arguments\n@@ -506,8 +513,14 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n- current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\n if args.use_mps_device:\n+ warnings.warn(\n+ '`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use \"--mps\" instead.',\n+ FutureWarning,\n+ )\n+ args.mps = True\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.mps)\n+ if args.mps:\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n elif args.gpu_ids != \"all\" and args.gpu_ids is not None:\n current_env[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\n@@ -704,7 +717,10 @@ def deepspeed_launcher(args):\n )\n \n if args.fp16:\n- warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', FutureWarning)\n+ warnings.warn(\n+ '--fp16 flag is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use \"--mixed_precision fp16\" instead.',\n+ FutureWarning,\n+ )\n mixed_precision = \"fp16\"\n \n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n@@ -955,18 +971,18 @@ def launch_command(args):\n if (\n not args.multi_gpu\n and not args.tpu\n+ and not args.mps\n and not args.use_deepspeed\n and not args.use_fsdp\n- and not args.use_mps_device\n and not args.use_megatron_lm\n ):\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n- args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n+ args.mps = defaults.distributed_type == DistributedType.MPS\n args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM\n- if not args.use_mps_device:\n+ if not args.mps:\n if args.gpu_ids is None:\n if defaults.gpu_ids is not None:\n args.gpu_ids = defaults.gpu_ids\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/814", "pr_id": 1108444656 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 287e6899a..63ea4a71b 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -213,7 +213,10 @@ def __init__(\n )\n \n if fp16:\n- warnings.warn('fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n+ warnings.warn(\n+ \"`fp16=True` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision='fp16'` instead.\",\n+ FutureWarning,\n+ )\n mixed_precision = \"fp16\"\n \n if deepspeed_plugin is None: # init from env variables\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex df4f703c3..415b06906 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -173,7 +173,10 @@ def launch_command_parser(subparsers=None):\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n )\n resource_args.add_argument(\n- \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\n+ \"--fp16\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"This argument is deprecated, use `--mixed_precision fp16` instead.\",\n )\n resource_args.add_argument(\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n@@ -403,30 +406,6 @@ def launch_command_parser(subparsers=None):\n type=str,\n help=\"FSDP's state dict type. (useful only when `use_fsdp` flag is passed).\",\n )\n- fsdp_args.add_argument(\n- \"--offload_params\",\n- default=None,\n- type=str,\n- help=\"This argument is deprecated. Use `fsdp_offload_params` instead.\",\n- )\n- fsdp_args.add_argument(\n- \"--min_num_params\",\n- type=int,\n- default=None,\n- help=\"This argument is deprecated. Use `fsdp_min_num_params` instead.\",\n- )\n- fsdp_args.add_argument(\n- \"--sharding_strategy\",\n- type=int,\n- default=None,\n- help=\"This argument is deprecated. Use `fsdp_sharding_strategy` instead.\",\n- )\n- fsdp_args.add_argument(\n- \"--transformer_layer_cls_to_wrap\",\n- default=None,\n- type=str,\n- help=\"This argument is deprecated. Use `fsdp_transformer_layer_cls_to_wrap` instead.\",\n- )\n \n # megatron_lm args\n megatron_lm_args = parser.add_argument_group(\"Megatron-LM Arguments\", \"Arguments related to Megatron-LM.\")\n@@ -547,7 +526,10 @@ def simple_launcher(args):\n )\n \n if args.fp16:\n- warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ warnings.warn(\n+ \"`fp16` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision fp16` instead.\",\n+ FutureWarning,\n+ )\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n@@ -596,43 +578,14 @@ def multi_gpu_launcher(args):\n raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.\")\n \n if args.fp16:\n- warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ warnings.warn(\n+ \"`fp16` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision fp16` instead.\",\n+ FutureWarning,\n+ )\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n if args.use_fsdp:\n- if args.sharding_strategy is not None:\n- warnings.warn(\n- \"`sharding_strategy` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n- \" `fsdp_sharding_strategy` instead\",\n- FutureWarning,\n- )\n- args.fsdp_sharding_strategy = args.sharding_strategy\n-\n- if args.offload_params is not None:\n- warnings.warn(\n- \"`offload_params` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n- \" `fsdp_offload_params` instead\",\n- FutureWarning,\n- )\n- args.fsdp_offload_params = args.offload_params\n-\n- if args.min_num_params is not None:\n- warnings.warn(\n- \"`min_num_params` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n- \" `fsdp_min_num_params` instead\",\n- FutureWarning,\n- )\n- args.fsdp_min_num_params = args.min_num_params\n-\n- if args.transformer_layer_cls_to_wrap is not None:\n- warnings.warn(\n- \"`transformer_layer_cls_to_wrap` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n- \" `fsdp_transformer_layer_cls_to_wrap` instead\",\n- FutureWarning,\n- )\n- args.fsdp_transformer_layer_cls_to_wrap = args.transformer_layer_cls_to_wrap\n-\n current_env[\"USE_FSDP\"] = \"true\"\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\n current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.fsdp_offload_params).lower()\n@@ -751,7 +704,7 @@ def deepspeed_launcher(args):\n )\n \n if args.fp16:\n- warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', FutureWarning)\n mixed_precision = \"fp16\"\n \n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n@@ -921,7 +874,7 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n )\n \n if args.fp16:\n- warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', FutureWarning)\n mixed_precision = \"fp16\"\n \n # Environment variables to be set for use during training job\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex ea8fffa51..4de8b5858 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -105,7 +105,10 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n )\n \n if use_fp16:\n- warnings.warn('use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n+ warnings.warn(\n+ \"`fp16=True` is deprecated and will be removed in version 0.15.0 of πŸ€— Accelerate. Use `mixed_precision='fp16'` instead.\",\n+ FutureWarning,\n+ )\n mixed_precision = \"fp16\"\n \n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/813", "pr_id": 1108328128 }, { "diff": "diff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\nindex 120bb7f42..ef429a369 100644\n--- a/src/accelerate/hooks.py\n+++ b/src/accelerate/hooks.py\n@@ -160,17 +160,19 @@ def new_forward(*args, **kwargs):\n return module\n \n \n-def remove_hook_from_module(module: nn.Module):\n+def remove_hook_from_module(module: nn.Module, recurse=False):\n \"\"\"\n Removes any hook attached to a module via `add_hook_to_module`.\n \n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n+ recurse (`bool`, **optional**): Whether to remove the hooks recursively\n \n Returns:\n `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can\n be discarded).\n \"\"\"\n+\n if hasattr(module, \"_hf_hook\"):\n module._hf_hook.detach_hook(module)\n delattr(module, \"_hf_hook\")\n@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\n module.forward = module._old_forward\n delattr(module, \"_old_forward\")\n \n+ if recurse:\n+ for child in module.children():\n+ remove_hook_from_module(child, recurse)\n+\n return module\n \n \n", "code_comments": [ { "body": "Not sure the check is useful (should be >0 but the loop will just be empty)\r\n```suggestion\r\n if recurse:\r\n```", "diff_hunk": "@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\n module.forward = module._old_forward\n delattr(module, \"_old_forward\")\n \n+ if recurse and len(list(module.children())) > 1:", "from_author": false }, { "body": "```suggestion\r\n for child in module.children():\r\n```\r\nNo need to ask for the names if you don't want them ;-)", "diff_hunk": "@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\n module.forward = module._old_forward\n delattr(module, \"_old_forward\")\n \n+ if recurse and len(list(module.children())) > 1:\n+ for _, child in module.named_children():", "from_author": false }, { "body": "Nice catch! Yes indeed ;) ", "diff_hunk": "@@ -179,6 +181,10 @@ def remove_hook_from_module(module: nn.Module):\n module.forward = module._old_forward\n delattr(module, \"_old_forward\")\n \n+ if recurse and len(list(module.children())) > 1:", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks for the suggestion! Can confirm the `BetterTransformers` tests pass on my side ;) ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/812", "pr_id": 1108296843 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 4fad12c72..d5eeef99a 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -157,3 +157,9 @@ def is_overflow(self):\n def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was skipped.\"\"\"\n return self._is_overflow\n+\n+ def __getstate__(self):\n+ return self.__dict__.copy()\n+\n+ def __setstate__(self, state):\n+ self.__dict__.update(state)\ndiff --git a/tests/test_optimizer.py b/tests/test_optimizer.py\nnew file mode 100644\nindex 000000000..15a095bf7\n--- /dev/null\n+++ b/tests/test_optimizer.py\n@@ -0,0 +1,36 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import pickle\n+import unittest\n+\n+import torch\n+\n+from accelerate import Accelerator\n+from accelerate.state import AcceleratorState\n+from accelerate.test_utils import require_cpu\n+\n+\n+@require_cpu\n+class OptimizerTester(unittest.TestCase):\n+ def test_accelerated_optimizer_pickling(self):\n+ model = torch.nn.Linear(10, 10)\n+ optimizer = torch.optim.SGD(model.parameters(), 0.1)\n+ accelerator = Accelerator()\n+ optimizer = accelerator.prepare(optimizer)\n+ try:\n+ pickle.loads(pickle.dumps(optimizer))\n+ except Exception as e:\n+ self.fail(f\"Accelerated optimizer pickling failed with {e}\")\n+ AcceleratorState._reset_state()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Perfect, thanks!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/811", "pr_id": 1108011205 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 7c8534e93..df4f703c3 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -61,180 +61,367 @@\n \n logger = logging.getLogger(__name__)\n \n+options_to_group = {\n+ \"--multi-gpu\": \"Distributed GPUs\",\n+ \"--tpu\": \"TPU\",\n+ \"--use_mps_device\": \"MPS\",\n+ \"--use_deepspeed\": \"DeepSpeed Arguments\",\n+ \"--use_fsdp\": \"FSDP Arguments\",\n+ \"--use_megatron_lm\": \"Megatron-LM Arguments\",\n+}\n+\n+\n+def clean_option(option):\n+ \"Finds all cases of - after the first two characters and changes them to _\"\n+ if option.startswith(\"--\"):\n+ return option[:3] + option[3:].replace(\"-\", \"_\")\n+\n+\n+class _CustomHelpAction(argparse._HelpAction):\n+ \"\"\"\n+ This is a custom help action that will hide all arguments that are not used in the command line when the help is\n+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments\n+ for that platform.\n+ \"\"\"\n+\n+ def __call__(self, parser, namespace, values, option_string=None):\n+ if \"accelerate\" in sys.argv[0] and \"launch\" in sys.argv[1:]:\n+ args = sys.argv[2:]\n+ else:\n+ args = sys.argv[1:]\n+ opts = parser._actions\n+ titles = [\n+ \"Hardware Selection Arguments\",\n+ \"Resource Selection Arguments\",\n+ \"Training Paradigm Arguments\",\n+ \"positional arguments\",\n+ \"optional arguments\",\n+ ]\n+ if len(args) > 1:\n+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]\n+ args = list(map(clean_option, args))\n+ used_titles = [options_to_group[o] for o in used_platforms]\n+ for i, arg in enumerate(opts):\n+ # If the argument's container is outside of the used titles, hide it\n+ if arg.container.title not in titles + used_titles:\n+ setattr(opts[i], \"help\", argparse.SUPPRESS)\n+ # If the argument is hardware selection, but not being passed, hide it\n+ elif arg.container.title == \"Hardware Selection Arguments\":\n+ if set(arg.option_strings).isdisjoint(set(args)):\n+ setattr(opts[i], \"help\", argparse.SUPPRESS)\n+ else:\n+ setattr(opts[i], \"help\", arg.help + \" (currently selected)\")\n+ # If the argument is a training paradigm, but not being passed, hide it\n+ elif arg.container.title == \"Training Paradigm Arguments\":\n+ if set(arg.option_strings).isdisjoint(set(used_platforms)):\n+ setattr(opts[i], \"help\", argparse.SUPPRESS)\n+ else:\n+ setattr(opts[i], \"help\", arg.help + \" (currently selected)\")\n+ for i, group in enumerate(list(parser._action_groups)):\n+ # If all arguments in the group are hidden, hide the group\n+ if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):\n+ parser._action_groups.remove(group)\n+\n+ super().__call__(parser, namespace, values, option_string)\n+\n \n def launch_command_parser(subparsers=None):\n if subparsers is not None:\n- parser = subparsers.add_parser(\"launch\")\n+ parser = subparsers.add_parser(\"launch\", add_help=False)\n else:\n- parser = argparse.ArgumentParser(\"Accelerate launch command\")\n+ parser = argparse.ArgumentParser(\"Accelerate launch command\", add_help=False)\n+\n+ parser.register(\"action\", \"help\", _CustomHelpAction)\n+ parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit.\")\n \n parser.add_argument(\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\n )\n- parser.add_argument(\n+ # Hardware selection arguments\n+ hardware_args = parser.add_argument_group(\n+ \"Hardware Selection Arguments\", \"Arguments for selecting the hardware to be used.\"\n+ )\n+ hardware_args.add_argument(\n+ \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\n+ )\n+ hardware_args.add_argument(\n \"--multi_gpu\",\n default=False,\n action=\"store_true\",\n help=\"Whether or not this should launch a distributed GPU training.\",\n )\n- parser.add_argument(\n+ hardware_args.add_argument(\n+ \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n+ )\n+ hardware_args.add_argument(\n \"--use_mps_device\",\n default=False,\n action=\"store_true\",\n help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\n )\n \n- # deepspeed args\n- parser.add_argument(\n+ # Resource selection arguments\n+ resource_args = parser.add_argument_group(\n+ \"Resource Selection Arguments\", \"Arguments for fine-tuning how available hardware should be used.\"\n+ )\n+ resource_args.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether or not to use mixed precision training. \"\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n+ )\n+ resource_args.add_argument(\n+ \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\n+ )\n+ resource_args.add_argument(\n+ \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n+ )\n+ resource_args.add_argument(\n+ \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\n+ )\n+ resource_args.add_argument(\n+ \"--num_cpu_threads_per_process\",\n+ type=int,\n+ default=None,\n+ help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\n+ )\n+\n+ # Training Paradigm arguments\n+ paradigm_args = parser.add_argument_group(\n+ \"Training Paradigm Arguments\", \"Arguments for selecting which training paradigm to be used.\"\n+ )\n+ paradigm_args.add_argument(\n \"--use_deepspeed\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use deepspeed.\",\n )\n+ paradigm_args.add_argument(\n+ \"--use_fsdp\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether to use fsdp.\",\n+ )\n+ paradigm_args.add_argument(\n+ \"--use_megatron_lm\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether to use Megatron-LM.\",\n+ )\n+\n+ # distributed GPU training arguments\n+ distributed_args = parser.add_argument_group(\"Distributed GPUs\", \"Arguments related to distributed GPU training.\")\n+ distributed_args.add_argument(\n+ \"--gpu_ids\",\n+ default=None,\n+ help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\n+ )\n+ distributed_args.add_argument(\n+ \"--same_network\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether all machines used for multinode training exist on the same local network.\",\n+ )\n+ distributed_args.add_argument(\n+ \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\n+ )\n+ distributed_args.add_argument(\n+ \"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\"\n+ )\n+ distributed_args.add_argument(\n+ \"--main_process_port\",\n+ type=int,\n+ default=None,\n+ help=\"The port to use to communicate with the machine of rank 0.\",\n+ )\n+ # Rendezvous related arguments\n+ distributed_args.add_argument(\n+ \"--rdzv_conf\",\n+ type=str,\n+ default=\"\",\n+ help=\"Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).\",\n+ )\n+ distributed_args.add_argument(\n+ \"--max_restarts\",\n+ type=int,\n+ default=0,\n+ help=\"Maximum number of worker group restarts before failing.\",\n+ )\n+ distributed_args.add_argument(\n+ \"--monitor_interval\",\n+ type=float,\n+ default=5,\n+ help=\"Interval, in seconds, to monitor the state of workers.\",\n+ )\n+ parser.add_argument(\n+ \"-m\",\n+ \"--module\",\n+ action=\"store_true\",\n+ help=\"Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\",\n+ )\n parser.add_argument(\n+ \"--no_python\",\n+ action=\"store_true\",\n+ help=\"Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\",\n+ )\n+\n+ # tpu arguments\n+ tpu_args = parser.add_argument_group(\"TPU\", \"Arguments related to TPU.\")\n+ tpu_args.add_argument(\n+ \"--main_training_function\",\n+ type=str,\n+ default=None,\n+ help=\"The name of the main function to be executed in your script (only for TPU training).\",\n+ )\n+ tpu_args.add_argument(\n+ \"--downcast_bf16\",\n+ action=\"store_true\",\n+ help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\n+ )\n+\n+ # DeepSpeed arguments\n+ deepspeed_args = parser.add_argument_group(\"DeepSpeed Arguments\", \"Arguments related to DeepSpeed.\")\n+ deepspeed_args.add_argument(\n \"--deepspeed_config_file\",\n default=None,\n type=str,\n help=\"DeepSpeed config file.\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--zero_stage\",\n default=None,\n type=int,\n help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--offload_param_device\",\n default=None,\n type=str,\n help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--gradient_accumulation_steps\",\n default=None,\n type=int,\n help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--gradient_clipping\",\n default=None,\n type=float,\n help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--deepspeed_hostfile\",\n default=None,\n type=str,\n help=\"DeepSpeed hostfile for configuring multi-node compute resources.\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--deepspeed_exclusion_filter\",\n default=None,\n type=str,\n help=\"DeepSpeed exclusion filter string when using mutli-node setup.\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--deepspeed_inclusion_filter\",\n default=None,\n type=str,\n help=\"DeepSpeed inclusion filter string when using mutli-node setup.\",\n )\n- parser.add_argument(\n+ deepspeed_args.add_argument(\n \"--deepspeed_multinode_launcher\",\n default=None,\n type=str,\n help=\"DeepSpeed multi-node launcher to use.\",\n )\n \n- # fsdp args\n- parser.add_argument(\n- \"--use_fsdp\",\n- default=False,\n- action=\"store_true\",\n- help=\"Whether to use fsdp.\",\n- )\n- parser.add_argument(\n+ # fsdp arguments\n+ fsdp_args = parser.add_argument_group(\"FSDP Arguments\", \"Arguments related to Fully Shared Data Parallelism.\")\n+ fsdp_args.add_argument(\n \"--fsdp_offload_params\",\n default=\"false\",\n type=str,\n help=\"Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--fsdp_min_num_params\",\n type=int,\n default=1e8,\n help=\"FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--fsdp_sharding_strategy\",\n type=int,\n default=1,\n help=\"FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--fsdp_auto_wrap_policy\",\n type=str,\n default=None,\n help=\"FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--fsdp_transformer_layer_cls_to_wrap\",\n default=None,\n type=str,\n help=\"Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... \"\n \"(useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--fsdp_backward_prefetch_policy\",\n default=None,\n type=str,\n help=\"FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--fsdp_state_dict_type\",\n default=None,\n type=str,\n help=\"FSDP's state dict type. (useful only when `use_fsdp` flag is passed).\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--offload_params\",\n default=None,\n type=str,\n help=\"This argument is deprecated. Use `fsdp_offload_params` instead.\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--min_num_params\",\n type=int,\n default=None,\n help=\"This argument is deprecated. Use `fsdp_min_num_params` instead.\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--sharding_strategy\",\n type=int,\n default=None,\n help=\"This argument is deprecated. Use `fsdp_sharding_strategy` instead.\",\n )\n- parser.add_argument(\n+ fsdp_args.add_argument(\n \"--transformer_layer_cls_to_wrap\",\n default=None,\n type=str,\n@@ -242,45 +429,40 @@ def launch_command_parser(subparsers=None):\n )\n \n # megatron_lm args\n- parser.add_argument(\n- \"--use_megatron_lm\",\n- default=False,\n- action=\"store_true\",\n- help=\"Whether to use Megatron-LM.\",\n- )\n- parser.add_argument(\n+ megatron_lm_args = parser.add_argument_group(\"Megatron-LM Arguments\", \"Arguments related to Megatron-LM.\")\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_tp_degree\",\n type=int,\n default=1,\n help=\"Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).\",\n )\n- parser.add_argument(\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_pp_degree\",\n type=int,\n default=1,\n help=\"Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).\",\n )\n- parser.add_argument(\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_num_micro_batches\",\n type=int,\n default=None,\n help=\"Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).\",\n )\n- parser.add_argument(\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_sequence_parallelism\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n- parser.add_argument(\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_recompute_activations\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable Selective Activation Recomputation. \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n- parser.add_argument(\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_use_distributed_optimizer\",\n default=None,\n type=str,\n@@ -288,7 +470,7 @@ def launch_command_parser(subparsers=None):\n \"which shards optimizer state and gradients across Data Pralellel (DP) ranks. \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n- parser.add_argument(\n+ megatron_lm_args.add_argument(\n \"--megatron_lm_gradient_clipping\",\n default=1.0,\n type=float,\n@@ -296,105 +478,15 @@ def launch_command_parser(subparsers=None):\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n \n- parser.add_argument(\n- \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n- )\n- parser.add_argument(\n- \"--mixed_precision\",\n- type=str,\n- choices=[\"no\", \"fp16\", \"bf16\"],\n- help=\"Whether or not to use mixed precision training. \"\n- \"Choose between FP16 and BF16 (bfloat16) training. \"\n- \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n- )\n-\n- parser.add_argument(\n- \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\n- )\n- parser.add_argument(\n- \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\n- )\n- parser.add_argument(\n- \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n- )\n- parser.add_argument(\n- \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\n- )\n- parser.add_argument(\n- \"--gpu_ids\",\n- default=None,\n- help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\n- )\n- parser.add_argument(\n- \"--same_network\",\n- default=False,\n- action=\"store_true\",\n- help=\"Whether all machines used for multinode training exist on the same local network.\",\n- )\n- parser.add_argument(\n- \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\n- )\n- parser.add_argument(\"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\")\n- parser.add_argument(\n- \"--main_process_port\",\n- type=int,\n- default=None,\n- help=\"The port to use to communicate with the machine of rank 0.\",\n- )\n- # Rendezvous related arguments\n- parser.add_argument(\n- \"--rdzv_conf\",\n- type=str,\n- default=\"\",\n- help=\"Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).\",\n- )\n- parser.add_argument(\n- \"--max_restarts\",\n- type=int,\n- default=0,\n- help=\"Maximum number of worker group restarts before failing.\",\n- )\n- parser.add_argument(\n- \"--monitor_interval\",\n- type=float,\n- default=5,\n- help=\"Interval, in seconds, to monitor the state of workers.\",\n- )\n- parser.add_argument(\n- \"--main_training_function\",\n- type=str,\n- default=None,\n- help=\"The name of the main function to be executed in your script (only for TPU training).\",\n- )\n- parser.add_argument(\n- \"--downcast_bf16\",\n- action=\"store_true\",\n- help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\n- )\n- parser.add_argument(\n- \"-m\",\n- \"--module\",\n- action=\"store_true\",\n- help=\"Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\",\n- )\n- parser.add_argument(\n- \"--no_python\",\n- action=\"store_true\",\n- help=\"Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\",\n- )\n- parser.add_argument(\n- \"--num_cpu_threads_per_process\",\n- type=int,\n- default=None,\n- help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\n- )\n- parser.add_argument(\n+ # AWS arguments\n+ aws_args = parser.add_argument_group(\"AWS Arguments\", \"Arguments related to AWS.\")\n+ aws_args.add_argument(\n \"--aws_access_key_id\",\n type=str,\n default=None,\n help=\"The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\",\n )\n- parser.add_argument(\n+ aws_args.add_argument(\n \"--aws_secret_access_key\",\n type=str,\n default=None,\n", "code_comments": [ { "body": "I hate this underscore convention in Python as it hurts readability and nothing is ever truly private. In Accelerate, like for Transformers, the convention is that things that are not in the main init are private. So no need to add all those _ :-)", "diff_hunk": "@@ -61,340 +61,430 @@\n \n logger = logging.getLogger(__name__)\n \n+_options_to_group = {\n+ \"--multi-gpu\": \"Distributed GPUs\",\n+ \"--tpu\": \"TPU\",\n+ \"--use_mps_device\": \"MPS\",\n+ \"--use_deepspeed\": \"DeepSpeed\",\n+ \"--use_fsdp\": \"FSDP\",\n+ \"--use_megatron_lm\": \"Megatron-LM\",\n+}\n+\n+\n+def _clean_option(option):", "from_author": false }, { "body": "Made public! :) Will note down for next time", "diff_hunk": "@@ -61,340 +61,430 @@\n \n logger = logging.getLogger(__name__)\n \n+_options_to_group = {\n+ \"--multi-gpu\": \"Distributed GPUs\",\n+ \"--tpu\": \"TPU\",\n+ \"--use_mps_device\": \"MPS\",\n+ \"--use_deepspeed\": \"DeepSpeed\",\n+ \"--use_fsdp\": \"FSDP\",\n+ \"--use_megatron_lm\": \"Megatron-LM\",\n+}\n+\n+\n+def _clean_option(option):", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/810", "pr_id": 1107487241 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 81759962c..287e6899a 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n \"a single one due to nested module wrapping and parameter flattening.\"\n )\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ try:\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ except TypeError:\n+ if \"differentiable\" in obj.optimizer.defaults:\n+ # https://github.com/huggingface/accelerate/issues/801\n+ defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != \"differentiable\"}\n+ optimizer = obj.optimizer.__class__(model.parameters(), **defaults)\n+ else:\n+ raise\n obj = self.prepare_optimizer(optimizer)\n optimizers.append(obj)\n elif isinstance(obj, torch.nn.Module):\n", "code_comments": [ { "body": "This will delete inplace, we should probably create a new dict here", "diff_hunk": "@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n \"a single one due to nested module wrapping and parameter flattening.\"\n )\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ try:\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ except TypeError:\n+ if \"differentiable\" in obj.optimizer.defaults:\n+ # https://github.com/huggingface/accelerate/issues/801\n+ del obj.optimizer.defaults[\"differentiable\"]", "from_author": false }, { "body": "```suggestion\r\n defaults = obj.optimizer.defaults.copy()\r\n del defaults[\"differentiable\"]\r\n optimizer = obj.optimizer.__class__(model.parameters(), **defaults)\r\n```", "diff_hunk": "@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n \"a single one due to nested module wrapping and parameter flattening.\"\n )\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ try:\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ except TypeError:\n+ if \"differentiable\" in obj.optimizer.defaults:\n+ # https://github.com/huggingface/accelerate/issues/801\n+ del obj.optimizer.defaults[\"differentiable\"]\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)", "from_author": true }, { "body": "Good point. Done. PTAL.", "diff_hunk": "@@ -647,7 +647,15 @@ def _prepare_fsdp(self, *args):\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n \"a single one due to nested module wrapping and parameter flattening.\"\n )\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ try:\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ except TypeError:\n+ if \"differentiable\" in obj.optimizer.defaults:\n+ # https://github.com/huggingface/accelerate/issues/801\n+ del obj.optimizer.defaults[\"differentiable\"]", "from_author": true }, { "body": "```suggestion\r\n defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != \"differentiable\"}\r\n```", "diff_hunk": "@@ -647,7 +647,16 @@ def _prepare_fsdp(self, *args):\n \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n \"a single one due to nested module wrapping and parameter flattening.\"\n )\n- optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ try:\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ except TypeError:\n+ if \"differentiable\" in obj.optimizer.defaults:\n+ # https://github.com/huggingface/accelerate/issues/801\n+ defaults = obj.optimizer.defaults.copy()\n+ del defaults[\"differentiable\"]", "from_author": false } ], "context": [ { "body": "closing #801", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thank you for the quick fix! πŸ€—", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/803", "pr_id": 1105761867 }, { "diff": "diff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\nindex 6300c7c1d..515a66d1a 100644\n--- a/src/accelerate/commands/accelerate_cli.py\n+++ b/src/accelerate/commands/accelerate_cli.py\n@@ -19,6 +19,7 @@\n from accelerate.commands.config import config_command_parser\n from accelerate.commands.env import env_command_parser\n from accelerate.commands.launch import launch_command_parser\n+from accelerate.commands.pod import pod_command_parser\n from accelerate.commands.test import test_command_parser\n \n \n@@ -28,9 +29,10 @@ def main():\n \n # Register commands\n config_command_parser(subparsers=subparsers)\n+ env_command_parser(subparsers=subparsers)\n launch_command_parser(subparsers=subparsers)\n+ pod_command_parser(subparsers=subparsers)\n test_command_parser(subparsers=subparsers)\n- env_command_parser(subparsers=subparsers)\n \n # Let's go\n args = parser.parse_args()\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 0fa3ceab2..acc4a1828 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -14,6 +14,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import os\n+\n from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\n from ...utils.constants import (\n DEEPSPEED_MULTINODE_LAUNCHERS,\n@@ -41,6 +43,10 @@ def get_cluster_input():\n main_process_port = None\n rdzv_backend = \"static\"\n same_network = True\n+ tpu_name = None\n+ tpu_zone = None\n+ commands = None\n+ command_file = None\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n@@ -341,6 +347,50 @@ def get_cluster_input():\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n default=\"main\",\n )\n+ use_cluster = _ask_field(\n+ \"Are you using a TPU cluster? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_cluster:\n+ tpu_name = _ask_field(\n+ \"What is the name of your TPU cluster? \",\n+ default=None,\n+ error_message=\"Please enter the name of your TPU cluster.\",\n+ )\n+ tpu_zone = _ask_field(\n+ \"What is the zone of your TPU cluster? \",\n+ default=None,\n+ error_message=\"Please enter the zone of your TPU cluster.\",\n+ )\n+ run_commands = _ask_field(\n+ \"Do you have code you wish to run on startup in each pod? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if run_commands:\n+ use_command_file = _ask_field(\n+ \"Is this code located in a bash script? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_command_file:\n+ command_file = _ask_field(\n+ \"What is the path to your bash script? \",\n+ default=None,\n+ error_message=\"Please enter the path to your bash script.\",\n+ )\n+ command_file = os.path.abspath(command_file)\n+ else:\n+ commands = _ask_field(\n+ \"What commands do you wish to run on startup in each pod? \",\n+ default=None,\n+ error_message=\"Please enter the commands you wish to run on startup in each pod as a single string.\",\n+ )\n+\n else:\n main_training_function = \"main\"\n \n@@ -408,4 +458,8 @@ def get_cluster_input():\n use_cpu=use_cpu,\n rdzv_backend=rdzv_backend,\n same_network=same_network,\n+ tpu_name=tpu_name,\n+ tpu_zone=tpu_zone,\n+ commands=commands,\n+ command_file=command_file,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 61f585858..9a1247c55 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -18,7 +18,7 @@\n import os\n from dataclasses import dataclass\n from enum import Enum\n-from typing import Optional, Union\n+from typing import List, Optional, Union\n \n import yaml\n \n@@ -151,6 +151,12 @@ class ClusterConfig(BaseConfig):\n # args for TPU\n downcast_bf16: bool = False\n \n+ # args for TPU pods\n+ tpu_name: str = None\n+ tpu_zone: str = None\n+ command_file: str = None\n+ command: List[str] = None\n+\n def __post_init__(self):\n if self.deepspeed_config is None:\n self.deepspeed_config = {}\ndiff --git a/src/accelerate/commands/env.py b/src/accelerate/commands/env.py\nindex b66008e1b..a19c04d4a 100644\n--- a/src/accelerate/commands/env.py\n+++ b/src/accelerate/commands/env.py\n@@ -1,3 +1,19 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import argparse\n import os\n import platform\ndiff --git a/src/accelerate/commands/pod.py b/src/accelerate/commands/pod.py\nnew file mode 100644\nindex 000000000..87cd4f904\n--- /dev/null\n+++ b/src/accelerate/commands/pod.py\n@@ -0,0 +1,152 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import subprocess\n+\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\n+from packaging.version import Version, parse\n+\n+\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+\n+\n+def pod_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for accelerate.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--pod_config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for the pod.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--command_file\",\n+ default=None,\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\n+ )\n+ parser.add_argument(\n+ \"--command\",\n+ action=\"append\",\n+ nargs=\"+\",\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_name\",\n+ default=None,\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_zone\",\n+ default=None,\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--install_accelerate\",\n+ action=\"store_true\",\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\n+ )\n+ parser.add_argument(\n+ \"--accelerate_version\",\n+ default=\"latest\",\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\",\n+ )\n+ parser.add_argument(\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=pod_launcher)\n+ return parser\n+\n+\n+def pod_launcher(args):\n+ defaults = None\n+\n+ # Get the default from the config file if it exists.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ defaults = load_config_from_file(args.config_file)\n+ if not args.command_file and defaults.command_file is not None and not args.command:\n+ args.command_file = defaults.command_file\n+ if not args.command and defaults.command is not None:\n+ args.command = defaults.command\n+ if not args.tpu_name:\n+ args.tpu_name = defaults.tpu_name\n+ if not args.tpu_zone:\n+ args.tpu_zone = defaults.tpu_zone\n+ if args.accelerate_version == \"dev\":\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\n+ elif args.accelerate_version == \"latest\":\n+ args.accelerate_version = \"accelerate -U\"\n+ elif isinstance(parse(args.accelerate_version), Version):\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\n+\n+ if not args.command_file and not args.command:\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")\n+\n+ if args.command_file:\n+ with open(args.command_file, \"r\") as f:\n+ args.command = [f.read().splitlines()]\n+\n+ # To turn list of lists into list of strings\n+ args.command = [line for cmd in args.command for line in cmd]\n+ # Default to the shared folder and install accelerate\n+ new_cmd = [\"cd /usr/share\"]\n+ if args.install_accelerate:\n+ new_cmd += [f\"pip install {args.accelerate_version}\"]\n+ new_cmd += args.command\n+ args.command = \"; \".join(new_cmd)\n+\n+ # Then send it to gcloud\n+ # Eventually try to use google-api-core to do this instead of subprocess\n+ cmd = [\n+ \"gcloud\",\n+ \"compute\",\n+ \"tpus\",\n+ \"tpu-vm\",\n+ \"ssh\",\n+ args.tpu_name,\n+ \"--zone\",\n+ args.tpu_zone,\n+ \"--command\",\n+ args.command,\n+ \"--worker\",\n+ \"all\",\n+ ]\n+ if args.debug:\n+ print(f\"Running {' '.join(cmd)}\")\n+ return\n+ subprocess.run(cmd)\n+ print(\"Successfully setup pod.\")\n+\n+\n+def main():\n+ parser = pod_command_parser()\n+ args = parser.parse_args()\n+\n+ pod_launcher(args)\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\nindex ceed58290..2479f736d 100644\n--- a/tests/test_cli.py\n+++ b/tests/test_cli.py\n@@ -21,6 +21,7 @@\n \n import accelerate\n from accelerate.test_utils import execute_subprocess_async\n+from accelerate.test_utils.testing import run_command\n \n \n class AccelerateLauncherTester(unittest.TestCase):\n@@ -63,3 +64,151 @@ def test_config_compatibility(self):\n execute_subprocess_async(\n self.base_cmd + [\"--config_file\", str(config), self.test_file_path], env=os.environ.copy()\n )\n+\n+\n+class PodConfigTester(unittest.TestCase):\n+ \"\"\"\n+ Test case for verifying the `accelerate pod-config` CLI passes the right `gcloud` command.\n+ \"\"\"\n+\n+ tpu_name = \"test-tpu\"\n+ tpu_zone = \"us-central1-a\"\n+ command = \"ls\"\n+ cmd = [\"accelerate\", \"pod-config\"]\n+ base_output = \"cd /usr/share\"\n+ command_file = \"tests/test_samples/test_command_file.sh\"\n+ gcloud = \"Running gcloud compute tpus tpu-vm ssh\"\n+\n+ @staticmethod\n+ def clean_output(output):\n+ return \"\".join(output).rstrip()\n+\n+ def test_base(self):\n+ output = run_command(\n+ self.cmd\n+ + [\"--command\", self.command, \"--tpu_zone\", self.tpu_zone, \"--tpu_name\", self.tpu_name, \"--debug\"],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f\"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all\",\n+ )\n+\n+ def test_base_backward_compatibility(self):\n+ output = run_command(\n+ self.cmd\n+ + [\n+ \"--config_file\",\n+ \"tests/test_configs/0_12_0.yaml\",\n+ \"--command\",\n+ self.command,\n+ \"--tpu_zone\",\n+ self.tpu_zone,\n+ \"--tpu_name\",\n+ self.tpu_name,\n+ \"--debug\",\n+ ],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f\"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all\",\n+ )\n+\n+ def test_with_config_file(self):\n+ output = run_command(\n+ self.cmd + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--debug\"], return_stdout=True\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all',\n+ )\n+\n+ def test_with_config_file_and_command(self):\n+ output = run_command(\n+ self.cmd + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--command\", self.command, \"--debug\"],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f\"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all\",\n+ )\n+\n+ def test_with_config_file_and_multiple_command(self):\n+ output = run_command(\n+ self.cmd\n+ + [\n+ \"--config_file\",\n+ \"tests/test_configs/latest.yaml\",\n+ \"--command\",\n+ self.command,\n+ \"--command\",\n+ 'echo \"Hello World\"',\n+ \"--debug\",\n+ ],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all',\n+ )\n+\n+ def test_with_config_file_and_command_file(self):\n+ output = run_command(\n+ self.cmd\n+ + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--command_file\", self.command_file, \"--debug\"],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all',\n+ )\n+\n+ def test_with_config_file_and_command_file_backward_compatibility(self):\n+ output = run_command(\n+ self.cmd\n+ + [\n+ \"--config_file\",\n+ \"tests/test_configs/0_12_0.yaml\",\n+ \"--command_file\",\n+ self.command_file,\n+ \"--tpu_zone\",\n+ self.tpu_zone,\n+ \"--tpu_name\",\n+ self.tpu_name,\n+ \"--debug\",\n+ ],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all',\n+ )\n+\n+ def test_accelerate_install(self):\n+ output = run_command(\n+ self.cmd + [\"--config_file\", \"tests/test_configs/latest.yaml\", \"--install_accelerate\", \"--debug\"],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all',\n+ )\n+\n+ def test_accelerate_install_version(self):\n+ output = run_command(\n+ self.cmd\n+ + [\n+ \"--config_file\",\n+ \"tests/test_configs/latest.yaml\",\n+ \"--install_accelerate\",\n+ \"--accelerate_version\",\n+ \"12.0.0\",\n+ \"--debug\",\n+ ],\n+ return_stdout=True,\n+ )\n+ self.assertEqual(\n+ self.clean_output(output),\n+ f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all',\n+ )\ndiff --git a/tests/test_configs/latest.yaml b/tests/test_configs/latest.yaml\nindex 87b294cf8..de6be03a4 100644\n--- a/tests/test_configs/latest.yaml\n+++ b/tests/test_configs/latest.yaml\n@@ -15,3 +15,7 @@ num_processes: 1\n rdzv_backend: static\n same_network: true\n use_cpu: false\n+tpu_name: 'test-tpu'\n+tpu_zone: 'us-central1-a'\n+command: null\n+command_file: tests/test_samples/test_command_file.sh\n\\ No newline at end of file\ndiff --git a/tests/test_samples/test_command_file.sh b/tests/test_samples/test_command_file.sh\nnew file mode 100644\nindex 000000000..592a7d532\n--- /dev/null\n+++ b/tests/test_samples/test_command_file.sh\n@@ -0,0 +1,2 @@\n+echo \"hello world\"\n+echo \"this is a second command\"\n\\ No newline at end of file\n", "code_comments": [ { "body": "```suggestion\r\n \"Do you have code you wish to run on startup in each pod? [yes/NO]: \",\r\n```", "diff_hunk": "@@ -341,6 +347,50 @@ def get_cluster_input():\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n default=\"main\",\n )\n+ use_cluster = _ask_field(\n+ \"Are you using a TPU cluster? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_cluster:\n+ tpu_name = _ask_field(\n+ \"What is the name of your TPU cluster? \",\n+ default=None,\n+ error_message=\"Please enter the name of your TPU cluster.\",\n+ )\n+ tpu_zone = _ask_field(\n+ \"What is the zone of your TPU cluster? \",\n+ default=None,\n+ error_message=\"Please enter the zone of your TPU cluster.\",\n+ )\n+ run_commands = _ask_field(\n+ \"Do you have code you wish to be ran on startup in each pod? [yes/NO]: \",", "from_author": false }, { "body": "We can allow for mix and match by appending commands from the `args.command_file` to the `args.command`? Would it be better than raising error?", "diff_hunk": "@@ -0,0 +1,152 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import subprocess\n+\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\n+from packaging.version import Version, parse\n+\n+\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+\n+\n+def pod_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for accelerate.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--pod_config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for the pod.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--command_file\",\n+ default=None,\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\n+ )\n+ parser.add_argument(\n+ \"--command\",\n+ action=\"append\",\n+ nargs=\"+\",\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_name\",\n+ default=None,\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_zone\",\n+ default=None,\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--install_accelerate\",\n+ action=\"store_true\",\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\n+ )\n+ parser.add_argument(\n+ \"--accelerate_version\",\n+ default=\"latest\",\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\",\n+ )\n+ parser.add_argument(\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=pod_launcher)\n+ return parser\n+\n+\n+def pod_launcher(args):\n+ defaults = None\n+\n+ # Get the default from the config file if it exists.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ defaults = load_config_from_file(args.config_file)\n+ if not args.command_file and defaults.command_file is not None and not args.command:\n+ args.command_file = defaults.command_file\n+ if not args.command and defaults.command is not None:\n+ args.command = defaults.command\n+ if not args.tpu_name:\n+ args.tpu_name = defaults.tpu_name\n+ if not args.tpu_zone:\n+ args.tpu_zone = defaults.tpu_zone\n+ if args.accelerate_version == \"dev\":\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\n+ elif args.accelerate_version == \"latest\":\n+ args.accelerate_version = \"accelerate -U\"\n+ elif isinstance(parse(args.accelerate_version), Version):\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\n+\n+ if not args.command_file and not args.command:\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")", "from_author": false }, { "body": "```suggestion\r\n print(f\"Running {' '.join(cmd)}\")\r\n```", "diff_hunk": "@@ -0,0 +1,152 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import subprocess\n+\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\n+from packaging.version import Version, parse\n+\n+\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+\n+\n+def pod_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for accelerate.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--pod_config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for the pod.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--command_file\",\n+ default=None,\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\n+ )\n+ parser.add_argument(\n+ \"--command\",\n+ action=\"append\",\n+ nargs=\"+\",\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_name\",\n+ default=None,\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_zone\",\n+ default=None,\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--install_accelerate\",\n+ action=\"store_true\",\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\n+ )\n+ parser.add_argument(\n+ \"--accelerate_version\",\n+ default=\"latest\",\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\",\n+ )\n+ parser.add_argument(\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=pod_launcher)\n+ return parser\n+\n+\n+def pod_launcher(args):\n+ defaults = None\n+\n+ # Get the default from the config file if it exists.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ defaults = load_config_from_file(args.config_file)\n+ if not args.command_file and defaults.command_file is not None and not args.command:\n+ args.command_file = defaults.command_file\n+ if not args.command and defaults.command is not None:\n+ args.command = defaults.command\n+ if not args.tpu_name:\n+ args.tpu_name = defaults.tpu_name\n+ if not args.tpu_zone:\n+ args.tpu_zone = defaults.tpu_zone\n+ if args.accelerate_version == \"dev\":\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\n+ elif args.accelerate_version == \"latest\":\n+ args.accelerate_version = \"accelerate -U\"\n+ elif isinstance(parse(args.accelerate_version), Version):\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\n+\n+ if not args.command_file and not args.command:\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")\n+\n+ if args.command_file:\n+ with open(args.command_file, \"r\") as f:\n+ args.command = [f.read().splitlines()]\n+\n+ # To turn list of lists into list of strings\n+ args.command = [line for cmd in args.command for line in cmd]\n+ # Default to the shared folder and install accelerate\n+ new_cmd = [\"cd /usr/share\"]\n+ if args.install_accelerate:\n+ new_cmd += [f\"pip install {args.accelerate_version}\"]\n+ new_cmd += args.command\n+ args.command = \"; \".join(new_cmd)\n+\n+ # Then send it to gcloud\n+ # Eventually try to use google-api-core to do this instead of subprocess\n+ cmd = [\n+ \"gcloud\",\n+ \"compute\",\n+ \"tpus\",\n+ \"tpu-vm\",\n+ \"ssh\",\n+ args.tpu_name,\n+ \"--zone\",\n+ args.tpu_zone,\n+ \"--command\",\n+ args.command,\n+ \"--worker\",\n+ \"all\",\n+ ]\n+ if args.debug:\n+ print(cmd)", "from_author": false }, { "body": "That's a test class which is inherently private, thus we don't need to add all those `_` that hurt readability :-) ", "diff_hunk": "@@ -63,3 +65,155 @@ def test_config_compatibility(self):\n execute_subprocess_async(\n self.base_cmd + [\"--config_file\", str(config), self.test_file_path], env=os.environ.copy()\n )\n+\n+\n+class PodConfigTester(unittest.TestCase):\n+ \"\"\"\n+ Test case for verifying the `accelerate pod-config` CLI passes the right `gcloud` command.\n+ \"\"\"\n+\n+ _tpu_name = \"test-tpu\"\n+ _tpu_zone = \"us-central1-a\"\n+ _command = \"ls\"\n+ cmd = [\"accelerate\", \"pod-config\"]\n+ _base_output = \"cd /usr/share\"\n+ _command_file = \"tests/test_samples/test_command_file.sh\"", "from_author": false }, { "body": "With only allowing one vs the other it keeps the API simplistic, as otherwise we then have to worry about when in the order should the command come from in the CLI vs the bash script and that can be confusing to users. However in this particular case the check is just to ensure that you've passed *some* command to run in :) ", "diff_hunk": "@@ -0,0 +1,152 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import subprocess\n+\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\n+from packaging.version import Version, parse\n+\n+\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+\n+\n+def pod_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for accelerate.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--pod_config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for the pod.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--command_file\",\n+ default=None,\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\n+ )\n+ parser.add_argument(\n+ \"--command\",\n+ action=\"append\",\n+ nargs=\"+\",\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_name\",\n+ default=None,\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_zone\",\n+ default=None,\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--install_accelerate\",\n+ action=\"store_true\",\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\n+ )\n+ parser.add_argument(\n+ \"--accelerate_version\",\n+ default=\"latest\",\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\",\n+ )\n+ parser.add_argument(\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=pod_launcher)\n+ return parser\n+\n+\n+def pod_launcher(args):\n+ defaults = None\n+\n+ # Get the default from the config file if it exists.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ defaults = load_config_from_file(args.config_file)\n+ if not args.command_file and defaults.command_file is not None and not args.command:\n+ args.command_file = defaults.command_file\n+ if not args.command and defaults.command is not None:\n+ args.command = defaults.command\n+ if not args.tpu_name:\n+ args.tpu_name = defaults.tpu_name\n+ if not args.tpu_zone:\n+ args.tpu_zone = defaults.tpu_zone\n+ if args.accelerate_version == \"dev\":\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\n+ elif args.accelerate_version == \"latest\":\n+ args.accelerate_version = \"accelerate -U\"\n+ elif isinstance(parse(args.accelerate_version), Version):\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\n+\n+ if not args.command_file and not args.command:\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")", "from_author": true }, { "body": "We can keep this idea open though and if users want that we can enable it", "diff_hunk": "@@ -0,0 +1,152 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import subprocess\n+\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file\n+from packaging.version import Version, parse\n+\n+\n+_description = \"Run commands across a pod of TPU VMs for initial setup before running `accelerate launch`. Will also install Accelerate on the pod.\"\n+\n+\n+def pod_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"pod-config\", description=_description)\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate pod-config command\", description=_description)\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for accelerate.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--pod_config_file\",\n+ type=str,\n+ default=None,\n+ help=\"Path to the config file to use for the pod.\",\n+ )\n+\n+ parser.add_argument(\n+ \"--command_file\",\n+ default=None,\n+ help=\"The path to the file containing the commands to run on the pod on startup.\",\n+ )\n+ parser.add_argument(\n+ \"--command\",\n+ action=\"append\",\n+ nargs=\"+\",\n+ help=\"A command to run on the pod. If not specified, will use the command specified in the command file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_name\",\n+ default=None,\n+ help=\"The name of the TPU to use. If not specified, will use the TPU specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--tpu_zone\",\n+ default=None,\n+ help=\"The zone of the TPU to use. If not specified, will use the zone specified in the config file.\",\n+ )\n+ parser.add_argument(\n+ \"--install_accelerate\",\n+ action=\"store_true\",\n+ help=\"Whether to install accelerate on the pod. Defaults to False.\",\n+ )\n+ parser.add_argument(\n+ \"--accelerate_version\",\n+ default=\"latest\",\n+ help=\"The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.\",\n+ )\n+ parser.add_argument(\n+ \"--debug\", action=\"store_true\", help=\"If set, will print the command that would be run instead of running it.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=pod_launcher)\n+ return parser\n+\n+\n+def pod_launcher(args):\n+ defaults = None\n+\n+ # Get the default from the config file if it exists.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ defaults = load_config_from_file(args.config_file)\n+ if not args.command_file and defaults.command_file is not None and not args.command:\n+ args.command_file = defaults.command_file\n+ if not args.command and defaults.command is not None:\n+ args.command = defaults.command\n+ if not args.tpu_name:\n+ args.tpu_name = defaults.tpu_name\n+ if not args.tpu_zone:\n+ args.tpu_zone = defaults.tpu_zone\n+ if args.accelerate_version == \"dev\":\n+ args.accelerate_version = \"git+https://github.com/huggingface/accelerate.git\"\n+ elif args.accelerate_version == \"latest\":\n+ args.accelerate_version = \"accelerate -U\"\n+ elif isinstance(parse(args.accelerate_version), Version):\n+ args.accelerate_version = f\"accelerate=={args.accelerate_version}\"\n+\n+ if not args.command_file and not args.command:\n+ raise ValueError(\"You must specify either a command file or a command to run on the pod.\")", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/802", "pr_id": 1105709188 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex b9f6c1089..7c8534e93 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -592,6 +592,12 @@ def deepspeed_launcher(args):\n num_machines = getattr(args, \"num_machines\")\n main_process_ip = getattr(args, \"main_process_ip\")\n main_process_port = getattr(args, \"main_process_port\")\n+\n+ # make sure launcher is not None\n+ if args.deepspeed_multinode_launcher is None:\n+ # set to default pdsh\n+ setattr(args, \"deepspeed_multinode_launcher\", DEEPSPEED_MULTINODE_LAUNCHERS[0])\n+\n if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n cmd = [\"deepspeed\", \"--no_local_rank\"]\n cmd.extend([\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)])\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/800", "pr_id": 1105644322 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 83c6738af..e277e89a2 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -245,11 +245,15 @@ def dispatch_model(\n check_device_map(model, device_map)\n \n if main_device is None:\n- main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n+ if set(device_map.values()) == {\"cpu\"} or set(device_map.values()) == {\"cpu\", \"disk\"}:\n+ main_device = \"cpu\"\n+ else:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n \n- cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n- if state_dict is None and len(cpu_modules) > 0:\n- state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n+ if main_device != \"cpu\":\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n+ if state_dict is None and len(cpu_modules) > 0:\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n \n disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n if offload_dir is None and len(disk_modules) > 0:\n@@ -266,7 +270,8 @@ def dispatch_model(\n execution_device = {\n name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\n }\n- offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\n+ offloaded_devices = [\"disk\"] if main_device == \"cpu\" else [\"cpu\", \"disk\"]\n+ offload = {name: device in offloaded_devices for name, device in device_map.items()}\n save_folder = offload_dir if len(disk_modules) > 0 else None\n if state_dict is not None or save_folder is not None:\n weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Is this expected to work on the `mps` device too? I've checked #596, but the only device I see here is `cpu`:\r\nhttps://github.com/huggingface/accelerate/blob/0de16441266ba9a799aa338aa75c73bf078429e9/src/accelerate/utils/modeling.py#L464\r\n\r\nFurthermore, `get_max_memory` doesn't seem to take it into account: https://github.com/huggingface/accelerate/blob/b816e258a95904736c8e3a5a2b14a18d5f407f4a/src/accelerate/utils/modeling.py#L275-L284\r\n\r\nAlternatively, is it possible to override using a custom `device_map`?\r\n\r\nReference: https://github.com/huggingface/diffusers/pull/1042", "from_author": false }, { "body": "No we don't support the \"mps\" device at all in big model inference. `device_map=\"auto\"` will load the model on the CPU and then the user can't move them to the MPS device if they want.", "from_author": true }, { "body": "Ok understood, thanks! However, we get errors after moving to the MPS device, if we use `device_map=\"auto\"`. I'm not sure what's causing it, but some tensors are being moved to the CPU when performing inference. I'll investigate further.", "from_author": false }, { "body": "That's the side effect of using `device_map=\"auto\"` @Narsil was warning you about. It does more than just load the model with empty weights but adds hooks to the model.", "from_author": true }, { "body": "Yeah I saw the hooks while stepping with the debugger. One of the reasons why we want to use this is for easier communication (just use `accelerate` and `device_map=\"auto\"`). We'll see what we can do :)", "from_author": false }, { "body": "@pcuenca Loading weights efficiently is like 2 functions from `accelerate`.\r\n\r\nhttps://github.com/huggingface/accelerate/blob/main/src/accelerate/big_modeling.py#L35-L110\r\n\r\nAnd loading after the fact https://github.com/huggingface/transformers_bloom_parallel/blob/main/generate.py#L211-L270\r\n(Couldn't fetch rapidly from `accelerate` but it has some more complete and robust function).\r\n\r\nTo be completely honest, I feel like all this should be in Pytorch proper. the `meta` device is really a bad workaround.\r\n\r\nIn the meantime maybe instead of pushing towards `device_map=\"auto\"` you could instead use the 2 accelerate functions and wrapping them up nicely in `from_pretrained` ?", "from_author": false }, { "body": "> In the meantime maybe instead of pushing towards `device_map=\"auto\"` you could instead use the 2 accelerate functions and wrapping them up nicely in `from_pretrained` ?\r\n\r\nThat's what I was thinking, yes! What I'm not sure about is whether to apply those only for the `mps` device (and keep using `device_map` in the others), or something else. I'll do some experiments to see what it looks like in practice. Thanks a lot!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/797", "pr_id": 1103202807 }, { "diff": "diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex 750ff9d57..bc0fcdef1 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -178,5 +178,13 @@ def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule\n \"\"\"\n result = {}\n for module_name in submodule_names:\n- result.update({key: param for key, param in state_dict.items() if key.startswith(module_name)})\n+ # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the\n+ # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance)\n+ result.update(\n+ {\n+ key: param\n+ for key, param in state_dict.items()\n+ if key == module_name or key.startswith(module_name + \".\")\n+ }\n+ )\n return result\ndiff --git a/tests/test_offload.py b/tests/test_offload.py\nindex 765a9685d..a9ebd36ee 100644\n--- a/tests/test_offload.py\n+++ b/tests/test_offload.py\n@@ -21,6 +21,7 @@\n \n from accelerate.utils import (\n OffloadedWeightsLoader,\n+ extract_submodules_state_dict,\n is_torch_version,\n load_offloaded_weight,\n offload_state_dict,\n@@ -105,3 +106,12 @@ def test_offload_weights_loader(self):\n self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))\n for key, param in state_dict.items():\n self.assertTrue(torch.allclose(param, weight_map[key]))\n+\n+ def test_extract_submodules_state_dict(self):\n+ state_dict = {\"a.1\": 0, \"a.10\": 1, \"a.2\": 2}\n+ extracted = extract_submodules_state_dict(state_dict, [\"a.1\", \"a.2\"])\n+ self.assertDictEqual(extracted, {\"a.1\": 0, \"a.2\": 2})\n+\n+ state_dict = {\"a.1.a\": 0, \"a.10.a\": 1, \"a.2.a\": 2}\n+ extracted = extract_submodules_state_dict(state_dict, [\"a.1\", \"a.2\"])\n+ self.assertDictEqual(extracted, {\"a.1.a\": 0, \"a.2.a\": 2})\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/795", "pr_id": 1102193335 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex a438ab45b..3a31fed93 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -15,15 +15,23 @@\n # Expectation:\n # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n \n+import json\n import os\n import time\n from abc import ABCMeta, abstractmethod, abstractproperty\n-from typing import List, Optional, Union\n+from typing import Any, Dict, List, Optional, Union\n \n import yaml\n \n from .logging import get_logger\n-from .utils import LoggerType, is_aim_available, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+from .utils import (\n+ LoggerType,\n+ is_aim_available,\n+ is_comet_ml_available,\n+ is_mlflow_available,\n+ is_tensorboard_available,\n+ is_wandb_available,\n+)\n \n \n _available_trackers = []\n@@ -48,6 +56,10 @@\n \n _available_trackers.append(LoggerType.AIM)\n \n+if is_mlflow_available():\n+ import mlflow\n+\n+ _available_trackers.append(LoggerType.MLFLOW)\n \n logger = get_logger(__name__)\n \n@@ -398,9 +410,140 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`, *optional*):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str` or `os.PathLike`, defaults to `\".\"`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`, *optional*):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`Dict[str, str]`, *optional*):\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n+ nested_run (`bool`, *optional*, defaults to `False`):\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\n+ MLFLOW_NESTED_RUN has priority over this argument.\n+ run_name (`str`, *optional*):\n+ Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.\n+ description (`str`, *optional*):\n+ An optional string that populates the description box of the run. If a run is being resumed, the\n+ description is set on the resumed run. If a new run is being created, the description is set on the new\n+ run.\n+ \"\"\"\n+\n+ name = \"mlflow\"\n+ requires_logging_directory = True\n+\n+ def __init__(\n+ self,\n+ experiment_name: str = None,\n+ logging_dir: Optional[Union[str, os.PathLike]] = \".\",\n+ run_id: Optional[str] = None,\n+ tags: Optional[Union[Dict[str, Any], str]] = None,\n+ nested_run: Optional[bool] = False,\n+ run_name: Optional[str] = None,\n+ description: Optional[str] = None,\n+ ):\n+\n+ experiment_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", experiment_name)\n+ run_id = os.getenv(\"MLFLOW_RUN_ID\", run_id)\n+ tags = os.getenv(\"MLFLOW_TAGS\", tags)\n+ if isinstance(tags, str):\n+ tags = json.loads(tags)\n+\n+ nested_run = os.getenv(\"MLFLOW_NESTED_RUN\", nested_run)\n+\n+ experiment_id = mlflow.create_experiment(\n+ name=experiment_name,\n+ artifact_location=logging_dir,\n+ tags=tags,\n+ )\n+\n+ self.active_run = mlflow.start_run(\n+ run_id=run_id,\n+ experiment_id=experiment_id,\n+ run_name=run_name,\n+ nested=nested_run,\n+ tags=tags,\n+ description=description,\n+ )\n+\n+ logger.debug(f\"Initialized mlflow experiment {experiment_name}\")\n+ logger.debug(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.active_run\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+\n+ for name, value in list(values.items()):\n+ # internally, all values are converted to str in MLflow\n+ if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:\n+ logger.warning(\n+ f'Trainer is attempting to log a value of \"{value}\" for key \"{name}\" as a parameter. MLflow\\'s'\n+ f\" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute.\"\n+ )\n+ del values[name]\n+\n+ values_list = list(values.items())\n+\n+ # MLflow cannot log more than 100 values in one go, so we have to split it\n+ for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):\n+ mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))\n+\n+ logger.debug(\"Stored initial configuration hyperparameters to MLflow\")\n+\n+ def log(self, values: dict, step: Optional[int]):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ metrics = {}\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ metrics[k] = v\n+ else:\n+ logger.warning(\n+ f'MLflowTracker is attempting to log a value of \"{v}\" of type {type(v)} for key \"{k}\" as a metric. '\n+ \"MLflow's log_metric() only accepts float and int types so we dropped this attribute.\"\n+ )\n+\n+ mlflow.log_metrics(metrics, step=step)\n+ logger.debug(\"Successfully logged to mlflow\")\n+\n+ def finish(self):\n+ \"\"\"\n+ End the active MLflow run.\n+ \"\"\"\n+ mlflow.end_run()\n+\n+\n LOGGER_TYPE_TO_CLASS = {\n \"aim\": AimTracker,\n \"comet_ml\": CometMLTracker,\n+ \"mlflow\": MLflowTracker,\n \"tensorboard\": TensorBoardTracker,\n \"wandb\": WandBTracker,\n }\n@@ -424,6 +567,7 @@ def filter_trackers(\n - `\"tensorboard\"`\n - `\"wandb\"`\n - `\"comet_ml\"`\n+ - `\"mlflow\"`\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex b22518f9a..a017a2ad5 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -31,6 +31,7 @@\n is_datasets_available,\n is_deepspeed_available,\n is_megatron_lm_available,\n+ is_mlflow_available,\n is_rich_available,\n is_sagemaker_available,\n is_tensorboard_available,\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 8227fa5bc..7dd8798e9 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -203,6 +203,7 @@ class LoggerType(BaseEnum):\n TENSORBOARD = \"tensorboard\"\n WANDB = \"wandb\"\n COMETML = \"comet_ml\"\n+ MLFLOW = \"mlflow\"\n \n \n class PrecisionType(BaseEnum):\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex c33258a92..892ff63f8 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -136,3 +136,7 @@ def is_sagemaker_available():\n \n def is_tqdm_available():\n return importlib.util.find_spec(\"tqdm\") is not None\n+\n+\n+def is_mlflow_available():\n+ return importlib.util.find_spec(\"mlflow\") is not None\n", "code_comments": [ { "body": "```suggestion\r\n tags (`Dict[str, str]`, *optional*):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`dict`, `str`):", "from_author": false }, { "body": "```suggestion\r\n run_id (`str`, *optional*):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):", "from_author": false }, { "body": "```suggestion\r\n logging_dir (`str` or `os.PathLike`, defaults to `\".\"`):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):", "from_author": false }, { "body": "```suggestion\r\n experiment_name (`str`, *optional*):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):", "from_author": false }, { "body": "```suggestion\r\n nested_run (`bool`, *optional*, defaults to `False`):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`dict`, `str`):\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n+ nested_run (`bool`):", "from_author": false }, { "body": "```suggestion\r\n run_name (`str`, *optional*):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`dict`, `str`):\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n+ nested_run (`bool`):\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\n+ MLFLOW_NESTED_RUN has priority over this argument.\n+ run_name (`str`):", "from_author": false }, { "body": "```suggestion\r\n Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`dict`, `str`):\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n+ nested_run (`bool`):\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\n+ MLFLOW_NESTED_RUN has priority over this argument.\n+ run_name (`str`):\n+ Name of new run (stored as a mlflow.runName tag). Used only when run_id is unspecified.", "from_author": false }, { "body": "```suggestion\r\n description (`str`, *optional*):\r\n```", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`dict`, `str`):\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n+ nested_run (`bool`):\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\n+ MLFLOW_NESTED_RUN has priority over this argument.\n+ run_name (`str`):\n+ Name of new run (stored as a mlflow.runName tag). Used only when run_id is unspecified.\n+ description (`str`):", "from_author": false }, { "body": "Instead of storing those constants, let's use them directly when necessary (otherwise the reader has to figure out what those things are :-) 0", "diff_hunk": "@@ -398,9 +410,143 @@ def finish(self):\n self.writer.close()\n \n \n+class MLflowTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n+\n+ Args:\n+ experiment_name (`str`):\n+ Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for mlflow logs to be stored.\n+ run_id (`str`):\n+ If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s\n+ end time is unset and its status is set to running, but the run’s other attributes (source_version,\n+ source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n+ tags (`dict`, `str`):\n+ An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n+ run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n+ set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n+ nested_run (`bool`):\n+ Controls whether run is nested in parent run. True creates a nested run. Environment variable\n+ MLFLOW_NESTED_RUN has priority over this argument.\n+ run_name (`str`):\n+ Name of new run (stored as a mlflow.runName tag). Used only when run_id is unspecified.\n+ description (`str`):\n+ An optional string that populates the description box of the run. If a run is being resumed, the\n+ description is set on the resumed run. If a new run is being created, the description is set on the new\n+ run.\n+ \"\"\"\n+\n+ name = \"mlflow\"\n+ requires_logging_directory = True\n+\n+ def __init__(\n+ self,\n+ experiment_name: str = None,\n+ logging_dir: Optional[Union[str, os.PathLike]] = \".\",\n+ run_id: Optional[str] = None,\n+ tags: Optional[Union[Dict[str, Any], str]] = None,\n+ nested_run: Optional[bool] = False,\n+ run_name: Optional[str] = None,\n+ description: Optional[str] = None,\n+ ):\n+\n+ self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH\n+ self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger, how is it now?", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/794", "pr_id": 1101114625 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex c14833557..e471731a4 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -476,7 +476,11 @@ def infer_auto_device_map(\n current_memory_used = 0\n \n # Direct submodules and parameters\n- modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ modules_to_treat = (\n+ list(model.named_parameters(recurse=False))\n+ + list(model.named_children())\n+ + list(model.named_buffers(recurse=False))\n+ )\n # Initialize maximum largest layer, to know which space to keep in memory\n max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "So if I understood it correctly, if you have some modules such as `nn.BatchNorm` in your model (as it is done in the `accelerate` CI test), the buffers `running_mean` and `running_var` will not be stored inside `model._buffers` but in `model.named_buffers()`. That is why I had to \"filter out\" the buffers by considering only the ones that are inside `model._buffers` and `model.named_buffers()` \r\n\r\nHere is an example that I have quickly tried: \r\n```\r\nmodel = nn.Sequential(nn.Linear(1, 1), nn.BatchNorm1d(1), nn.Embedding(1, 1), nn.LayerNorm(1)) \r\nprint(list(model.named_buffers()))\r\n>>>[('1.running_mean', tensor([0.])), ('1.running_var', tensor([1.])), ('1.num_batches_tracked', tensor(0))]\r\nprint(list(model._buffers))\r\n>>> []\r\nmodel.register_buffer(\"position_bias\", torch.ones(1))\r\nprint(list(model._buffers))\r\n>>> ['position_bias']\r\nprint(list(model.named_buffers()))\r\n>>> [('position_bias', tensor([1.])), ('1.running_mean', tensor([0.])), ('1.running_var', tensor([1.])), ('1.num_batches_tracked', tensor(0))]\r\n```\r\n\r\n", "from_author": true }, { "body": "I think in this case, it's just the difference between `named_buffers(recurse=True)` and `named_buffers(recurse=False)`. I'm not convinced this fix is the right fix, so would like to learn more what is failing.", "from_author": false }, { "body": "Ah yes I see, you're probably right here! Let me dig a bit more and get back to you here ", "from_author": true }, { "body": "@sgugger I might have more clue on what is failing \r\nI think that the problem comes from the fact that the `infer_auto_device_map` does take into account only modules and submodules, I have made a script below to better illustrate the problem\r\n\r\n```\r\nimport torch.nn as nn\r\nimport torch\r\nfrom accelerate.utils import infer_auto_device_map\r\nfrom accelerate.big_modeling import dispatch_model\r\n\r\nclass SubModule(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.register_buffer(\"position_bias\", torch.ones(1, 1000))\r\n\r\nclass Model(nn.Module):\r\n def __init__(self, wrap_module=True):\r\n super().__init__()\r\n self.l1 = nn.Linear(1000, 1000)\r\n self.l2 = nn.Linear(1000, 1000)\r\n self.l3 = nn.Linear(1000, 1000)\r\n\r\n self.bn1 = nn.BatchNorm1d(1000)\r\n self.bn2 = nn.BatchNorm1d(1000)\r\n\r\n if wrap_module:\r\n self.position_bias = SubModule()\r\n else:\r\n self.register_buffer(\"position_bias\", torch.ones(1, 1000))\r\n\r\n# Test 1: wrapping with a module - this will pass\r\nmodel = Model()\r\ndevice_map = infer_auto_device_map(model, {0:\"10MB\", \"cpu\":\"100MB\"})\r\nmodel = dispatch_model(model, device_map)\r\n\r\n# Test 2: below will fail\r\nmodel = Model(wrap_module=False)\r\ndevice_map = infer_auto_device_map(model, {0:\"10MB\", \"cpu\":\"100MB\"})\r\nmodel = dispatch_model(model, device_map)\r\n```\r\nLet me know what do you think! \r\n\r\nI guess this failed for `BartPreTrainedModel` since the `position_bias` buffer is on the parent module itself", "from_author": true }, { "body": "Ah, in this case it looks very much like the problem #747 fixed for top-level parameters, so the fix should be pretty similar here too!", "from_author": false }, { "body": "The whole testing suite (including slow tests) is green! 🟒 Merging ! \r\n\r\n", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/792", "pr_id": 1100987961 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex cdae54af9..8fbc8758f 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -352,16 +352,16 @@ class DataLoaderShard(DataLoader):\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n \n- def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):\n+ def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, **kwargs):\n super().__init__(dataset, **kwargs)\n self.device = device\n self.rng_types = rng_types\n- self.generator = generator\n+ self.synchronized_generator = synchronized_generator\n self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ synchronize_rng_states(self.rng_types, self.synchronized_generator)\n self.gradient_state._set_end_of_dataloader(False)\n try:\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n@@ -650,12 +650,12 @@ def prepare_data_loader(\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n- generator = getattr(dataloader, \"generator\", None)\n+ synchronized_generator = None\n # No change if no multiprocess\n if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:\n if isinstance(new_dataset, IterableDataset):\n if getattr(dataloader.dataset, \"generator\", None) is not None:\n- generator = dataloader.dataset.generator\n+ synchronized_generator = dataloader.dataset.generator\n new_dataset = IterableDatasetShard(\n new_dataset,\n batch_size=dataloader.batch_size,\n@@ -674,8 +674,7 @@ def prepare_data_loader(\n if hasattr(sampler, \"generator\"):\n if sampler.generator is None:\n sampler.generator = torch.Generator()\n- generator = sampler.generator\n- generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\n+ synchronized_generator = sampler.generator\n \n batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\n new_batch_sampler = BatchSamplerShard(\n@@ -692,10 +691,9 @@ def prepare_data_loader(\n \"sampler\",\n \"batch_sampler\",\n \"drop_last\",\n- \"generator\",\n ]\n \n- if rng_types is not None and generator is None and \"generator\" in rng_types:\n+ if rng_types is not None and synchronized_generator is None and \"generator\" in rng_types:\n rng_types.remove(\"generator\")\n \n kwargs = {\n@@ -710,6 +708,7 @@ def prepare_data_loader(\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n if dispatch_batches:\n+ kwargs.pop(\"generator\")\n dataloader = DataLoaderDispatcher(\n new_dataset,\n split_batches=split_batches,\n@@ -722,9 +721,9 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\n+ batch_size=dataloader.batch_size,\n rng_types=rng_types,\n- generator=generator,\n+ synchronized_generator=synchronized_generator,\n **kwargs,\n )\n else:\n@@ -733,7 +732,7 @@ def prepare_data_loader(\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n batch_sampler=new_batch_sampler,\n rng_types=rng_types,\n- generator=generator,\n+ synchronized_generator=synchronized_generator,\n **kwargs,\n )\n \n", "code_comments": [ { "body": "We should have an else branch here in case `self.rng_types` contains something different than `\"generator\"`, by sending `None` for the generator argument (since we have removed `\"generator\"` from the rng_types at this stage, if there is none available.", "diff_hunk": "@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\n+ sampler = (\n+ batch_sampler.batch_sampler.sampler\n+ if hasattr(batch_sampler, \"batch_sampler\")\n+ else batch_sampler.sampler\n+ )\n+ if hasattr(sampler, \"generator\"):\n+ generator = sampler.generator\n+ synchronize_rng_states(self.rng_types, generator)\n self.gradient_state._set_end_of_dataloader(False)", "from_author": false }, { "body": "I wonder why `dataloader.sampler` can be `BatchSampler`?", "diff_hunk": "@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler", "from_author": false }, { "body": "See the example script above in the PR, it has various scenarios. ", "diff_hunk": "@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler", "from_author": true }, { "body": "see https://github.com/huggingface/accelerate/issues/679, it helps reduce the number of queries\r\n", "diff_hunk": "@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler", "from_author": true }, { "body": "https://huggingface.co/docs/datasets/v2.4.0/en/use_with_pytorch#use-a-batchsampler", "diff_hunk": "@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler", "from_author": true }, { "body": "Wow! Thanks a lot!\r\nI spent hours finding a way to query my dataset with a list of indices instead of collating data outside the dataset(so that I can write batching logic in the dataset/reduce the number of queries), and failed(thus I was forced to use DataLoader as a sampler(sharded by accelerate), as mentioned in #624 ).\r\nNow I know I can just use batch sampler as sampler without passing batch size to `DataLoader`.\r\nThanks!!!πŸ˜πŸ‘", "diff_hunk": "@@ -361,7 +361,15 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n \n def __iter__(self):\n if self.rng_types is not None:\n- synchronize_rng_states(self.rng_types, self.generator)\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler", "from_author": false }, { "body": "We should probably leave this one out and pass it as the generator (as initially suggested in your PR).", "diff_hunk": "@@ -650,12 +650,12 @@ def prepare_data_loader(\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n- generator = getattr(dataloader, \"generator\", None)\n+ synchronized_generator = getattr(dataloader, \"generator\", None)", "from_author": false }, { "body": "Here we should pass the main generator of the original dataloader if there was one (see comment above).", "diff_hunk": "@@ -722,9 +721,9 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\n+ batch_size=dataloader.batch_size,\n rng_types=rng_types,\n- generator=generator,", "from_author": false }, { "body": "Same there.", "diff_hunk": "@@ -733,7 +732,7 @@ def prepare_data_loader(\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n batch_sampler=new_batch_sampler,\n rng_types=rng_types,\n- generator=generator,", "from_author": false }, { "body": "I remove `\"generator\"` from `ignore_kwargs`. It should be enough.", "diff_hunk": "@@ -722,9 +721,9 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\n+ batch_size=dataloader.batch_size,\n rng_types=rng_types,\n- generator=generator,", "from_author": false }, { "body": "\"generator\" is removed from `ignore_kwargs`.", "diff_hunk": "@@ -650,12 +650,12 @@ def prepare_data_loader(\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n- generator = getattr(dataloader, \"generator\", None)\n+ synchronized_generator = getattr(dataloader, \"generator\", None)", "from_author": false }, { "body": "Yes, but this one should not be synchronized I think. Or do we want to default to the dataloader generator if tehre is no generator in the sampler?", "diff_hunk": "@@ -650,12 +650,12 @@ def prepare_data_loader(\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n- generator = getattr(dataloader, \"generator\", None)\n+ synchronized_generator = getattr(dataloader, \"generator\", None)", "from_author": false }, { "body": "Indeed, resolving comment.", "diff_hunk": "@@ -722,9 +721,9 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n- batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\n+ batch_size=dataloader.batch_size,\n rng_types=rng_types,\n- generator=generator,", "from_author": false }, { "body": "Sorry I didn't get your point. Yes, this line should be `synchronized_generator = None`, as #790 .", "diff_hunk": "@@ -650,12 +650,12 @@ def prepare_data_loader(\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n- generator = getattr(dataloader, \"generator\", None)\n+ synchronized_generator = getattr(dataloader, \"generator\", None)", "from_author": false }, { "body": "Oops! Yup, resolved the comment", "diff_hunk": "@@ -650,12 +650,12 @@ def prepare_data_loader(\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n- generator = getattr(dataloader, \"generator\", None)\n+ synchronized_generator = getattr(dataloader, \"generator\", None)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "This PR currently will break \r\nhttps://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L653\r\nhttps://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L658\r\nhttps://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L677\r\n(Of course https://github.com/huggingface/accelerate/blob/37b2aa0173fe9b6f46e7df07b92bb55e54b77eb3/src/accelerate/data_loader.py#L698 should be modified)\r\n\r\n---\r\n\r\nThis code duplication (for finding the sampler) also impairs readability instead of improving readability.\r\n```python\r\nbatch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\r\nsampler = (\r\n batch_sampler.batch_sampler.sampler\r\n if hasattr(batch_sampler, \"batch_sampler\")\r\n else batch_sampler.sampler\r\n)\r\nif hasattr(sampler, \"generator\"):\r\n generator = sampler.generator\r\n synchronize_rng_states(self.rng_types, generator)\r\n```\r\nI believe what needs to be synchronized is IterableDataset's generator(according to `IterableDatasetShard` implementation) OR sampler's generator. We can store the synchronized generator in a private attribute.\r\n", "from_author": false }, { "body": "@YouJiacheng I am not seeing any breaks in all the lines you mention, which are completely orthogonal to the change suggested. As for the code duplication, let us worry about readability as maintainers :-) We cannot store the generator as a private attribute as it is fetched in two different functions.", "from_author": false }, { "body": "By \"break\", I means that these lines will become useless. And for `generator = dataloader.dataset.generator`, it is intended to synchronize the generator of IterableDataset. So after proposed change, IterableDataset's generator won't be synchronized.", "from_author": false }, { "body": "Ah!, I get what you mean, thanks for clarifying!\r\n\r\nWe should indeed make the difference between the `sampler_generator` (for batch samplers) or `dataset.generator` (in iterable dataset) and the Dataloader generator. The latter should be passed along again when creating the final DataLoader (or use the default if it's none) while the formers should be the one synchronized.\r\n\r\nThe check on `rng_types` should probably be moved to the init of `BatchSamplerShard`/`IterableDatasetShard`.", "from_author": false }, { "body": "It's hard to explain my proposal using text. So I open a parallel PR to show the code.(It is somewhat \"pseudocode\"/proof of concept, since I didn't test it).", "from_author": false }, { "body": "@YouJiacheng Thanks for taking the time to draft a PR to show your points, it's much clearer this way! I think we need to merge the two PRs somehow as they both contain important things the other has not.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/789", "pr_id": 1098848698 }, { "diff": "diff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex 0387109b6..ea8fffa51 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -43,12 +43,12 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n # Are we in a google colab or a Kaggle Kernel?\n+ in_colab = False\n+ in_kaggle = False\n if any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\n- in_colab_or_kaggle = True\n+ in_kaggle = True\n elif \"IPython\" in sys.modules:\n- in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n- else:\n- in_colab_or_kaggle = False\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n \n try:\n mixed_precision = PrecisionType(mixed_precision.lower())\n@@ -57,31 +57,29 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n \n- if in_colab_or_kaggle:\n- if os.environ.get(\"TPU_NAME\", None) is not None:\n- # TPU launch\n- import torch_xla.distributed.xla_multiprocessing as xmp\n-\n- if len(AcceleratorState._shared_state) > 0:\n- raise ValueError(\n- \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\n- \"your training function. Restart your notebook and make sure no cells initializes an \"\n- \"`Accelerator`.\"\n- )\n- if num_processes is None:\n- num_processes = 8\n+ if (in_colab or in_kaggle) and (os.environ.get(\"TPU_NAME\", None) is not None):\n+ # TPU launch\n+ import torch_xla.distributed.xla_multiprocessing as xmp\n \n- launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n- print(f\"Launching a training on {num_processes} TPU cores.\")\n- xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\n+ \"your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+ if num_processes is None:\n+ num_processes = 8\n+\n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\n+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n+ elif in_colab:\n+ # No need for a distributed launch otherwise as it's either CPU or one GPU.\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n else:\n- # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- if torch.cuda.is_available():\n- print(\"Launching training on one GPU.\")\n- else:\n- print(\"Launching training on one CPU.\")\n- function(*args)\n-\n+ print(\"Launching training on one CPU.\")\n+ function(*args)\n else:\n if num_processes is None:\n raise ValueError(\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/783", "pr_id": 1094266566 }, { "diff": "diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json\nnew file mode 100644\nindex 000000000..9d44afde7\n--- /dev/null\n+++ b/.devcontainer/devcontainer.json\n@@ -0,0 +1,25 @@\n+// File only needed for VSCode users to have proper Docker based interpreters\n+{\n+ \"name\": \"accelerate_dev_environment\",\n+ \"build\": {\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\n+ \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"\n+// \"dockerfile\": \"../docker/accelerate-gpu/Dockerfile\"\n+ },\n+ \"runArgs\": [\n+ // ACTION NEEDED: uncomment the next line if your local machine has GPUs available\n+// \"--gpus\", \"all\",\n+ // Enable the docker container to access system resources\n+ \"--ipc\", \"host\"\n+ ],\n+ \"remoteEnv\": {\n+ \"PYTHONPATH\": \"${containerEnv:PATH}:${containerWorkspaceFolder}\"\n+ },\n+ \"extensions\": [\n+ // Ensure we have IntelliSense in VSCode when running inside container\n+ \"ms-python.python\"\n+ ],\n+ \"workspaceFolder\": \"/workspaces/accelerate\",\n+ // Need git for VSCode to color code modifications. Only runs when building environment.\n+ \"onCreateCommand\": \"apt-get update && apt-get install -y git && pip install -e '.[dev]'\"\n+}\n\\ No newline at end of file\ndiff --git a/CONTRIBUTING.md b/CONTRIBUTING.md\nindex fcc7d9b1c..d0e142b16 100644\n--- a/CONTRIBUTING.md\n+++ b/CONTRIBUTING.md\n@@ -130,6 +130,9 @@ Follow these steps to start contributing:\n it with `pip uninstall accelerate` before reinstalling it in editable\n mode with the `-e` flag.)\n \n+ Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using\n+ the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).\n+\n 5. Develop the features on your branch.\n \n As you work on the features, you should make sure that the test suite\n", "code_comments": [ { "body": "I think we should enable CPU by default and have the user work on the container to activate GPU.", "diff_hunk": "@@ -0,0 +1,25 @@\n+// File only needed for VSCode users to have proper Docker based interpreters\n+{\n+ \"name\": \"accelerate_dev_environment\",\n+ \"build\": {\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\n+ // \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -0,0 +1,25 @@\n+// File only needed for VSCode users to have proper Docker based interpreters\n+{\n+ \"name\": \"accelerate_dev_environment\",\n+ \"build\": {\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\n+ // \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"\n+ \"dockerfile\": \"../docker/accelerate-gpu/Dockerfile\"\n+ },\n+ \"runArgs\": [\n+ // ACTION NEEDED: comment the next line if your local machine has no GPUs available", "from_author": false }, { "body": "Sure, I was torn on this, so am happy to go the other way.", "diff_hunk": "@@ -0,0 +1,25 @@\n+// File only needed for VSCode users to have proper Docker based interpreters\n+{\n+ \"name\": \"accelerate_dev_environment\",\n+ \"build\": {\n+ // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment\n+ // \"dockerfile\": \"../docker/accelerate-cpu/Dockerfile\"", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> Also, if the users change the devcontainer.json for their local use, won't those changes be pushed to GitHub automatically?\r\n\r\nUnfortunately, VSCode doesn't (yet) support multiple dev container configurations to my knowledge, so we have to rely on people not committing dev container changes.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/782", "pr_id": 1094235843 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex bf17f57ce..81759962c 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -153,6 +153,10 @@ class Accelerator:\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n underlying dataset is an `IterableDataset`, `False` otherwise.\n+ even_batches (`bool`, *optional*, defaults to `True`):\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\n+ all workers.\n step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):\n Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only\n done under certain circumstances (at the end of each epoch, for instance).\n@@ -191,6 +195,7 @@ def __init__(\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n+ even_batches: bool = True,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n@@ -305,6 +310,7 @@ def __init__(\n raise ImportError(\n \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n )\n+ self.even_batches = even_batches\n self.step_scheduler_with_optimizer = step_scheduler_with_optimizer\n \n # Mixed precision attributes\n@@ -1109,6 +1115,7 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_p\n put_on_device=device_placement,\n rng_types=self.rng_types.copy(),\n dispatch_batches=self.dispatch_batches,\n+ even_batches=self.even_batches,\n )\n \n def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex cdae54af9..39f843430 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -118,7 +118,8 @@ class BatchSamplerShard(BatchSampler):\n \n <Tip warning={true}>\n \n- This does not support `BatchSampler` with varying batch size yet.\n+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`\n+ equal to `False`\n \n </Tip>\"\"\"\n \n@@ -570,6 +571,7 @@ def prepare_data_loader(\n put_on_device: bool = False,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n+ even_batches: bool = True,\n ) -> DataLoader:\n \"\"\"\n Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.\n@@ -615,15 +617,21 @@ def prepare_data_loader(\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\n `IterableDataset`, `False` otherwise.\n+ even_batches (`bool`, *optional*, defaults to `True`):\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\n+ all workers.\n \n Returns:\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n \n <Tip warning={true}>\n \n- This does not support `BatchSampler` with varying batch size yet.\n+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`\n+ equal to `False`\n \n- </Tip>\"\"\"\n+ </Tip>\n+ \"\"\"\n if dispatch_batches is None:\n if is_torch_version(\"<\", \"1.8.0\") or not put_on_device:\n dispatch_batches = False\n@@ -683,6 +691,7 @@ def prepare_data_loader(\n num_processes=num_processes,\n process_index=process_index,\n split_batches=split_batches,\n+ even_batches=even_batches,\n )\n \n # We ignore all of those since they are all dealt with by our new_batch_sampler\ndiff --git a/src/accelerate/test_utils/scripts/test_distributed_data_loop.py b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\nnew file mode 100644\nindex 000000000..eaf7c6a34\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_distributed_data_loop.py\n@@ -0,0 +1,113 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from typing import List\n+\n+import torch\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate.accelerator import Accelerator\n+\n+\n+def create_accelerator(even_batches=True):\n+ accelerator = Accelerator(even_batches=even_batches)\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\n+ return accelerator\n+\n+\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\n+ \"\"\"\n+ Create a simple DataLoader to use during the test cases\n+ \"\"\"\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\n+\n+ dl = DataLoader(dataset, batch_size=batch_size)\n+ dl = accelerator.prepare(dl)\n+\n+ return dl\n+\n+\n+def verify_dataloader_batch_sizes(\n+ accelerator: Accelerator,\n+ dataset_size: int,\n+ batch_size: int,\n+ process_0_expected_batch_sizes: List[int],\n+ process_1_expected_batch_sizes: List[int],\n+):\n+ \"\"\"\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\n+ \"\"\"\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\n+\n+ batch_sizes = [len(batch[0]) for batch in dl]\n+\n+ if accelerator.process_index == 0:\n+ assert batch_sizes == process_0_expected_batch_sizes\n+ elif accelerator.process_index == 1:\n+ assert batch_sizes == process_1_expected_batch_sizes\n+\n+\n+def test_default_ensures_even_batch_sizes():\n+\n+ accelerator = create_accelerator()\n+\n+ # without padding, we would expect a different number of batches\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=3,\n+ batch_size=1,\n+ process_0_expected_batch_sizes=[1, 1],\n+ process_1_expected_batch_sizes=[1, 1],\n+ )\n+\n+ # without padding, we would expect the same number of batches, but different sizes\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=7,\n+ batch_size=2,\n+ process_0_expected_batch_sizes=[2, 2],\n+ process_1_expected_batch_sizes=[2, 2],\n+ )\n+\n+\n+def test_can_disable_even_batches():\n+ accelerator = create_accelerator(even_batches=False)\n+\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=3,\n+ batch_size=1,\n+ process_0_expected_batch_sizes=[1, 1],\n+ process_1_expected_batch_sizes=[1],\n+ )\n+\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=7,\n+ batch_size=2,\n+ process_0_expected_batch_sizes=[2, 2],\n+ process_1_expected_batch_sizes=[2, 1],\n+ )\n+\n+\n+if __name__ == \"__main__\":\n+ accelerator = create_accelerator()\n+\n+ accelerator.print(\"Test that even_batches variable ensures uniform batches across processes\")\n+ test_default_ensures_even_batch_sizes()\n+\n+ accelerator.print(\"Run tests with even_batches disabled\")\n+ test_can_disable_even_batches()\ndiff --git a/tests/test_multigpu.py b/tests/test_multigpu.py\nindex 2c0403ed5..54a5bca75 100644\n--- a/tests/test_multigpu.py\n+++ b/tests/test_multigpu.py\n@@ -28,6 +28,9 @@ class MultiGPUTester(unittest.TestCase):\n def setUp(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_script.py\"])\n+ self.data_loop_file_path = os.path.sep.join(\n+ mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_distributed_data_loop.py\"]\n+ )\n \n @require_multi_gpu\n def test_multi_gpu(self):\n@@ -42,6 +45,17 @@ def test_pad_across_processes(self):\n with patch_environment(omp_num_threads=1):\n execute_subprocess_async(cmd, env=os.environ.copy())\n \n+ @require_multi_gpu\n+ def test_distributed_data_loop(self):\n+ \"\"\"\n+ This TestCase checks the behaviour that occurs during distributed training or evaluation,\n+ when the batch size does not evenly divide the dataset size.\n+ \"\"\"\n+ print(f\"Found {torch.cuda.device_count()} devices, using 2 devices only\")\n+ cmd = get_launch_prefix() + [f\"--nproc_per_node={torch.cuda.device_count()}\", self.data_loop_file_path]\n+ with patch_environment(omp_num_threads=1, cuda_visible_devices=\"0,1\"):\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n+\n \n if __name__ == \"__main__\":\n accelerator = Accelerator()\n", "code_comments": [ { "body": "```suggestion\r\n even_batches (`bool`, *optional*, defaults to `True`):\r\n```", "diff_hunk": "@@ -153,6 +153,10 @@ class Accelerator:\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n underlying dataset is an `IterableDataset`, `False` otherwise.\n+ even_batches ('bool', defaults to `True`):", "from_author": false }, { "body": "```suggestion\r\n even_batches (`bool`, *optional*, defaults to `True`):\r\n```", "diff_hunk": "@@ -615,15 +616,15 @@ def prepare_data_loader(\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\n `IterableDataset`, `False` otherwise.\n+ even_batches ('bool', defaults to `True`):", "from_author": false }, { "body": "You should leave the warning but rephrase to state a `BatchSampler` with varying batch size will require `even_batches=False`.", "diff_hunk": "@@ -615,15 +616,15 @@ def prepare_data_loader(\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\n `IterableDataset`, `False` otherwise.\n+ even_batches ('bool', defaults to `True`):\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\n+ all workers.\n \n Returns:\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n \n- <Tip warning={true}>\n-\n- This does not support `BatchSampler` with varying batch size yet.", "from_author": false }, { "body": "Sure, I can update that", "diff_hunk": "@@ -615,15 +616,15 @@ def prepare_data_loader(\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\n `IterableDataset`, `False` otherwise.\n+ even_batches ('bool', defaults to `True`):\n+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the\n+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\n+ all workers.\n \n Returns:\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n \n- <Tip warning={true}>\n-\n- This does not support `BatchSampler` with varying batch size yet.", "from_author": true }, { "body": "You're currently not testing anything at all, it's never being ran. Please see here to learn how: https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/scripts/test_sync.py#L229-L274\r\n\r\nI also highly recommend using print statements so we can see just what is being tested at a given moment, and we know if they've run πŸ˜„ ", "diff_hunk": "@@ -0,0 +1,103 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from typing import List\n+\n+import torch\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate.accelerator import Accelerator\n+\n+\n+def create_accelerator(even_batches=True):\n+ accelerator = Accelerator(even_batches=even_batches)\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\n+ return accelerator\n+\n+\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\n+ \"\"\"\n+ Create a simple DataLoader to use during the test cases\n+ \"\"\"\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\n+\n+ dl = DataLoader(dataset, batch_size=batch_size)\n+ dl = accelerator.prepare(dl)\n+\n+ return dl\n+\n+\n+def verify_dataloader_batch_sizes(\n+ accelerator: Accelerator,\n+ dataset_size: int,\n+ batch_size: int,\n+ process_0_expected_batch_sizes: List[int],\n+ process_1_expected_batch_sizes: List[int],\n+):\n+ \"\"\"\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\n+ \"\"\"\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\n+\n+ batch_sizes = [len(batch[0]) for batch in dl]\n+\n+ if accelerator.process_index == 0:\n+ assert batch_sizes == process_0_expected_batch_sizes\n+ elif accelerator.process_index == 1:\n+ assert batch_sizes == process_1_expected_batch_sizes\n+\n+\n+def test_default_ensures_even_batch_sizes():\n+\n+ accelerator = create_accelerator()\n+\n+ # without padding, we would expect a different number of batches\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=3,\n+ batch_size=1,\n+ process_0_expected_batch_sizes=[1, 1],\n+ process_1_expected_batch_sizes=[1, 1],\n+ )\n+\n+ # without padding, we would expect the same number of batches, but different sizes\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=7,\n+ batch_size=2,\n+ process_0_expected_batch_sizes=[2, 2],\n+ process_1_expected_batch_sizes=[2, 2],\n+ )\n+\n+\n+def test_can_disable_even_batches():", "from_author": false }, { "body": "Gah! I think I misinterpreted what you meant when you said PyTest format. Looking at the other example, it looks like it still needs a main method to drive running the tests; just what I removed a few commits back :satisfied:! I'll reinsert the main method and some print statements.\r\n\r\nThat said, when I run it with PyTest locally, using `CUDA_VISIBLE_DEVICES=\"0,1\" pytest -sv tests/test_multigpu.py` it does seem to pick them up.", "diff_hunk": "@@ -0,0 +1,103 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from typing import List\n+\n+import torch\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate.accelerator import Accelerator\n+\n+\n+def create_accelerator(even_batches=True):\n+ accelerator = Accelerator(even_batches=even_batches)\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\n+ return accelerator\n+\n+\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\n+ \"\"\"\n+ Create a simple DataLoader to use during the test cases\n+ \"\"\"\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\n+\n+ dl = DataLoader(dataset, batch_size=batch_size)\n+ dl = accelerator.prepare(dl)\n+\n+ return dl\n+\n+\n+def verify_dataloader_batch_sizes(\n+ accelerator: Accelerator,\n+ dataset_size: int,\n+ batch_size: int,\n+ process_0_expected_batch_sizes: List[int],\n+ process_1_expected_batch_sizes: List[int],\n+):\n+ \"\"\"\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\n+ \"\"\"\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\n+\n+ batch_sizes = [len(batch[0]) for batch in dl]\n+\n+ if accelerator.process_index == 0:\n+ assert batch_sizes == process_0_expected_batch_sizes\n+ elif accelerator.process_index == 1:\n+ assert batch_sizes == process_1_expected_batch_sizes\n+\n+\n+def test_default_ensures_even_batch_sizes():\n+\n+ accelerator = create_accelerator()\n+\n+ # without padding, we would expect a different number of batches\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=3,\n+ batch_size=1,\n+ process_0_expected_batch_sizes=[1, 1],\n+ process_1_expected_batch_sizes=[1, 1],\n+ )\n+\n+ # without padding, we would expect the same number of batches, but different sizes\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=7,\n+ batch_size=2,\n+ process_0_expected_batch_sizes=[2, 2],\n+ process_1_expected_batch_sizes=[2, 2],\n+ )\n+\n+\n+def test_can_disable_even_batches():", "from_author": true }, { "body": "I've just pushed some changes that hopefully resolve this. Just FYI, it is getting pretty late here in the UK, and I am heading off on vacation tomorrow until the 31st. If there are other changes required, and you would like this merged before I'm back, please feel free to update the branch with any further changes!", "diff_hunk": "@@ -0,0 +1,103 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from typing import List\n+\n+import torch\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate.accelerator import Accelerator\n+\n+\n+def create_accelerator(even_batches=True):\n+ accelerator = Accelerator(even_batches=even_batches)\n+ assert accelerator.num_processes == 2, \"this script expects that two GPUs are available\"\n+ return accelerator\n+\n+\n+def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):\n+ \"\"\"\n+ Create a simple DataLoader to use during the test cases\n+ \"\"\"\n+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))\n+\n+ dl = DataLoader(dataset, batch_size=batch_size)\n+ dl = accelerator.prepare(dl)\n+\n+ return dl\n+\n+\n+def verify_dataloader_batch_sizes(\n+ accelerator: Accelerator,\n+ dataset_size: int,\n+ batch_size: int,\n+ process_0_expected_batch_sizes: List[int],\n+ process_1_expected_batch_sizes: List[int],\n+):\n+ \"\"\"\n+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process\n+ \"\"\"\n+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)\n+\n+ batch_sizes = [len(batch[0]) for batch in dl]\n+\n+ if accelerator.process_index == 0:\n+ assert batch_sizes == process_0_expected_batch_sizes\n+ elif accelerator.process_index == 1:\n+ assert batch_sizes == process_1_expected_batch_sizes\n+\n+\n+def test_default_ensures_even_batch_sizes():\n+\n+ accelerator = create_accelerator()\n+\n+ # without padding, we would expect a different number of batches\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=3,\n+ batch_size=1,\n+ process_0_expected_batch_sizes=[1, 1],\n+ process_1_expected_batch_sizes=[1, 1],\n+ )\n+\n+ # without padding, we would expect the same number of batches, but different sizes\n+ verify_dataloader_batch_sizes(\n+ accelerator,\n+ dataset_size=7,\n+ batch_size=2,\n+ process_0_expected_batch_sizes=[2, 2],\n+ process_1_expected_batch_sizes=[2, 2],\n+ )\n+\n+\n+def test_can_disable_even_batches():", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Hi, I have made a start on the work discussed in https://github.com/huggingface/accelerate/issues/684, and added an argument to the accelerator. \r\n\r\nI am slightly unsure about your testing strategy though, and which tests you require for this; I couldn't seem to find any integration tests for accelerator arguments. So far, I have tried to follow similar ideas to those seen in the end-to-end multi-GPU tests. \r\n\r\nI am slightly confused by exactly how the Multi-GPU tests are triggered though; whilst I see that there is a decorator to skip certain tests when multiple GPUs are not available, when I try to run this locally (on a machine with 2 GPUs), those tests only seem to execute on a single process.\r\n\r\nAny guidance here would be appreciated! cc: @muellerzr @sgugger ", "from_author": true }, { "body": "Hey @Chris-hughes10! Thanks!\r\n\r\nYou're absolutely right, I should probably write this all up in a contribution doc and will add it to my list of tasks!\r\n\r\nAccelerates tests are setup in a few ways, but the key idea to remember is:\r\n\r\nIf it's something to be tested across multiple devices/workers, such as CPU, TPU, and mutli-GPU (such as your tests you want to add), these all get thrown into a script located in `accelerate/test_utils/scripts`\r\n\r\nFor example, the major integration script that checks the dataloaders operate properly is located at `accelerate/test_utils/scripts/test_script.py` (I wonder if we should section this out a bit more @sgugger since we now have a whole folder dedicated to it and we can clean it up some), which you can find here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/scripts/test_script.py\r\n\r\nThese tests are setup like how a `pytest` test would be, where you have `def some_test` and `assert` statements to check if things are working properly. \r\n\r\nFrom here your script should be added as a test case to `tests/test.py`, `tests/test_cpu.py` (if applicable for multi-cpu), `tests/test_multigpu.py`, and `tests/test_tpu.py`.\r\n\r\nIt has to be done this way because each platform has different launching configurations :) \r\n\r\nFinally, when running pytest you need to set the cuda visible devices for it to be able to pick up on the multiple GPUs, such as:\r\n\r\n```bash\r\nCUDA_VISIBLE_DEVICES=\"0,1\" pytest -sv tests/test_multigpu.py\r\n```\r\nOr:\r\n```bash\r\nCUDA_VISIBLE_DEVICES=\"\" pytest -sv tests/test_multigpu.py\r\n```\r\n(the latter will have all their tests skipped). \r\n\r\nAll skipping decorator conditionals can be found here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/testing.py\r\n\r\nLet me know if there are any other questions you have on this @Chris-hughes10, afterwards I can throw this all up into some documentation :) ", "from_author": false }, { "body": "Hey @muellerzr, Thanks for the info!\r\n\r\nSo, it looks like I was on the right track regarding the structure, but I was running with unittests and not Pytest. I have refactored it to pytest, and it seems to work alright using `CUDA_VISIBLE_DEVICES=\"0,1\" pytest -sv tests/test_multigpu.py, so it is probably ready for an initial review. \r\n\r\nI noticed that the main integration script seems to be doing a lot, so I created a new script to be super clear on what it is testing. The tests for the context manager and joining can probably live here too when implemented.\r\n\r\nAs an aside, how do you guys set up your development environment? I have set up a dev container, using the accelerate-gpu image, if it is useful, I could push that in another PR.", "from_author": true }, { "body": "@Chris-hughes10 I use conda for mine, and the CI's all run based off the docker images as there's hosted computes we use", "from_author": false }, { "body": "> @Chris-hughes10 I use conda for mine, and the CI's all run based off the conda images as there's hosted computes we use\r\n\r\nOk, if you are using conda envs, I think that this could be useful. I'll create a small PR and you guys can see what you think.", "from_author": true }, { "body": "Hey @muellerzr, are there any further changes required on this?", "from_author": true }, { "body": "@Chris-hughes10 close, your tests still weren't passing, and this is because `nproc_per_node` needed to be specified in the test function. (Discussing with Sylvain if this should be *everywhere* or not) but basically without that `Accelerator.num_processes` was only ever equal to one. \r\n\r\nWent ahead and pushed the fix :)", "from_author": false }, { "body": "Thanks for all your work on this!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/781", "pr_id": 1094096724 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex a9e955d43..52e8ecfb6 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -95,10 +95,13 @@ The following arguments are useful for customization of worker machines\n * `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\n * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\n * `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list\n+* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.\n * `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\n * `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\n+* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).\n * `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\n-\n+* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.\n+* `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.\n \n **DeepSpeed Arguments**:\n \n@@ -125,8 +128,9 @@ The following arguments are only useful when `use_fdsp` is passed:\n **TPU Arguments**:\n \n The following arguments are only useful when `tpu` is passed:\n-* `--tpu` (`bool`) - Whether or not this should launch a TPU training.\n+* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.\n * `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\n+* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\n \n **AWS SageMaker Arguments**:\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 2198bb3f1..b9f6c1089 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -325,6 +325,12 @@ def launch_command_parser(subparsers=None):\n default=None,\n help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\n )\n+ parser.add_argument(\n+ \"--same_network\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether all machines used for multinode training exist on the same local network.\",\n+ )\n parser.add_argument(\n \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\n )\n@@ -363,7 +369,7 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--downcast_bf16\",\n action=\"store_true\",\n- help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32\",\n+ help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\n )\n parser.add_argument(\n \"-m\",\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/780", "pr_id": 1092658402 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fa7588b0a..29e1b7bad 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -573,7 +573,7 @@ def multi_gpu_launcher(args):\n try:\n distrib_run.run(args)\n except:\n- if debug:\n+ if is_rich_available() and debug:\n console = get_console()\n console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n console.print_exception(suppress=[__file__], show_locals=False)\n@@ -684,7 +684,7 @@ def deepspeed_launcher(args):\n try:\n distrib_run.run(args)\n except:\n- if debug:\n+ if is_rich_available() and debug:\n console = get_console()\n console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n console.print_exception(suppress=[__file__], show_locals=False)\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 82ea288f0..00ba3ab67 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -14,11 +14,19 @@\n \n import os\n import warnings\n-from distutils.util import strtobool\n \n import torch\n \n-from .utils import DistributedType, get_ccl_version, is_ccl_available, is_deepspeed_available, is_tpu_available\n+from .utils import (\n+ DistributedType,\n+ get_ccl_version,\n+ get_int_from_env,\n+ is_ccl_available,\n+ is_deepspeed_available,\n+ is_tpu_available,\n+ parse_choice_from_env,\n+ parse_flag_from_env,\n+)\n from .utils.dataclasses import SageMakerDistributedType\n \n \n@@ -26,25 +34,6 @@\n import torch_xla.core.xla_model as xm\n \n \n-def get_int_from_env(env_keys, default):\n- \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\n- for e in env_keys:\n- val = int(os.environ.get(e, -1))\n- if val >= 0:\n- return val\n- return default\n-\n-\n-def parse_flag_from_env(key, default=False):\n- value = os.environ.get(key, str(default))\n- return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\n-\n-\n-def parse_choice_from_env(key, default=\"no\"):\n- value = os.environ.get(key, str(default))\n- return value\n-\n-\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex b472ec556..b22518f9a 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -19,6 +19,7 @@\n SageMakerDistributedType,\n TensorInformation,\n )\n+from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env\n from .imports import (\n get_ccl_version,\n is_aim_available,\ndiff --git a/src/accelerate/utils/environment.py b/src/accelerate/utils/environment.py\nnew file mode 100644\nindex 000000000..9247bb4fe\n--- /dev/null\n+++ b/src/accelerate/utils/environment.py\n@@ -0,0 +1,36 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from distutils.util import strtobool\n+\n+\n+def get_int_from_env(env_keys, default):\n+ \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\n+ for e in env_keys:\n+ val = int(os.environ.get(e, -1))\n+ if val >= 0:\n+ return val\n+ return default\n+\n+\n+def parse_flag_from_env(key, default=False):\n+ \"\"\"Returns truthy value for `key` from the env if available else the default.\"\"\"\n+ value = os.environ.get(key, str(default))\n+ return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\n+\n+\n+def parse_choice_from_env(key, default=\"no\"):\n+ value = os.environ.get(key, str(default))\n+ return value\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex bdb53988a..c33258a92 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -20,6 +20,7 @@\n \n from packaging.version import parse\n \n+from .environment import parse_flag_from_env\n from .versions import compare_versions, is_torch_version\n \n \n@@ -126,7 +127,7 @@ def is_boto3_available():\n \n \n def is_rich_available():\n- return importlib.util.find_spec(\"rich\") is not None\n+ return (importlib.util.find_spec(\"rich\") is not None) and (not parse_flag_from_env(\"DISABLE_RICH\"))\n \n \n def is_sagemaker_available():\n", "code_comments": [ { "body": "Let's accept anything that's truthy here? Like yes/no, true/false, 0/1", "diff_hunk": "@@ -126,7 +127,7 @@ def is_boto3_available():\n \n \n def is_rich_available():\n- return importlib.util.find_spec(\"rich\") is not None\n+ return (importlib.util.find_spec(\"rich\") is not None) and (not os.environ.get(\"DISABLE_RICH\", \"0\") == \"1\")", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/779", "pr_id": 1092437199 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fa7588b0a..071c86a1d 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -470,7 +470,7 @@ def multi_gpu_launcher(args):\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n setattr(args, \"nnodes\", str(num_machines))\n setattr(args, \"node_rank\", int(args.machine_rank))\n- if getattr(args, \"same_network\"):\n+ if getattr(args, \"same_network\", False):\n setattr(args, \"master_addr\", str(main_process_ip))\n setattr(args, \"master_port\", str(main_process_port))\n else:\n@@ -488,7 +488,7 @@ def multi_gpu_launcher(args):\n setattr(args, \"no_python\", True)\n \n current_env = os.environ.copy()\n- gpu_ids = getattr(args, \"gpu_ids\")\n+ gpu_ids = getattr(args, \"gpu_ids\", \"all\")\n if gpu_ids != \"all\" and args.gpu_ids is not None:\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\n mixed_precision = args.mixed_precision.lower()\n@@ -618,7 +618,7 @@ def deepspeed_launcher(args):\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n setattr(args, \"nnodes\", str(num_machines))\n setattr(args, \"node_rank\", int(args.machine_rank))\n- if getattr(args, \"same_network\"):\n+ if getattr(args, \"same_network\", False):\n setattr(args, \"master_addr\", str(main_process_ip))\n setattr(args, \"master_port\", str(main_process_port))\n else:\n@@ -636,7 +636,7 @@ def deepspeed_launcher(args):\n setattr(args, \"no_python\", True)\n \n current_env = os.environ.copy()\n- gpu_ids = getattr(args, \"gpu_ids\")\n+ gpu_ids = getattr(args, \"gpu_ids\", \"all\")\n if gpu_ids != \"all\" and args.gpu_ids is not None:\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\n try:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/778", "pr_id": 1092392637 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 1760e72bc..c14833557 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -408,8 +408,9 @@ def get_balanced_memory(\n per_gpu += buffer\n \n max_memory = get_max_memory(max_memory)\n+ last_gpu = max(i for i in max_memory if isinstance(i, int) and max_memory[i] > 0)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(last_gpu):\n max_memory[i] = min(0 if low_zero and i == 0 else per_gpu, max_memory[i])\n \n if low_zero:\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex 02ab867d7..16243d5e1 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -376,6 +376,10 @@ def test_get_balanced_memory(self):\n max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500})\n self.assertDictEqual({0: 215, 1: 500}, max_memory)\n \n+ # Last device always get max memory to give more buffer, even if CPU is provided\n+ max_memory = get_balanced_memory(model, max_memory={0: 300, \"cpu\": 1000})\n+ self.assertDictEqual({0: 300, \"cpu\": 1000}, max_memory)\n+\n # If we set a device to 0, it's not counted.\n max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})\n self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)\n", "code_comments": [ { "body": "```suggestion\r\n offset = 2 if \"cpu\" in (list(max_memory.keys())) else 1\r\n for i in range(len(max_memory) - offset):\r\n```\r\nI think this way it seems that the slow tests of `accelerate` are not broken and we also keep the `transformers` tests pass! Can you confirm this pass the `transformers` tests that were broken @ArthurZucker ? πŸ™ ", "diff_hunk": "@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(num_devices - 1):", "from_author": false }, { "body": "Will have a lookπŸ˜„ I am also adding the following \r\n```python\r\nmax_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500, \"cpu\":1000})\r\n```", "diff_hunk": "@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(num_devices - 1):", "from_author": true }, { "body": "Ahhh great! I was missing this test! Thanks for clarifying πŸ’ͺ ", "diff_hunk": "@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(num_devices - 1):", "from_author": false }, { "body": "This won't work when the last device is GPU 3 but the user forced 0 at GPU 0, since `num_devices` will be 3 (instead of 4) and thus the max memory will be at GPU 2 instead of 3. (you can try running the multi-GPU tests and they will fail).\r\n\r\nAgreed that `len(max_memory)` is not good either, so we should just compute the biggest integer in the keys of `max_memory`:\r\n```py\r\nlast_gpu = max(i for i in max_memory if isintance(i, int))\r\n```\r\nthen use this.\r\n\r\n", "diff_hunk": "@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(num_devices - 1):", "from_author": false }, { "body": "Okay will add this suggestion πŸ’― ", "diff_hunk": "@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(num_devices - 1):", "from_author": true }, { "body": "Oh and we should avoid a GPU that the user set at 0, so actually\r\n```\r\nlast_gpu = max(i for i in max_memory if isintance(i, int) and max_memory[i] > 0)\r\n```", "diff_hunk": "@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(len(max_memory) - 1):\n+ for i in range(num_devices - 1):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/777", "pr_id": 1092225159 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex c4a015156..cdae54af9 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -112,6 +112,9 @@ class BatchSamplerShard(BatchSampler):\n this argument is set to `False`.\n - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`\n then `[6, 7]` if this argument is set to `True`.\n+ even_batches (`bool`, *optional*, defaults to `True`):\n+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\n+ multiple of (original batch size / number of processes).\n \n <Tip warning={true}>\n \n@@ -125,6 +128,7 @@ def __init__(\n num_processes: int = 1,\n process_index: int = 0,\n split_batches: bool = False,\n+ even_batches: bool = True,\n ):\n if split_batches and batch_sampler.batch_size % num_processes != 0:\n raise ValueError(\n@@ -135,8 +139,11 @@ def __init__(\n self.num_processes = num_processes\n self.process_index = process_index\n self.split_batches = split_batches\n- self.batch_size = batch_sampler.batch_size\n- self.drop_last = batch_sampler.drop_last\n+ self.even_batches = even_batches\n+ self.batch_size = getattr(batch_sampler, \"batch_size\", None)\n+ self.drop_last = getattr(batch_sampler, \"drop_last\", False)\n+ if self.batch_size is None and self.even_batches:\n+ raise ValueError(\"You need to use `even_batches=False` when the batch sampler has no batch size.\")\n \n @property\n def total_length(self):\n@@ -144,11 +151,21 @@ def total_length(self):\n \n def __len__(self):\n if self.split_batches:\n+ # Split batches does not change the length of the batch sampler\n return len(self.batch_sampler)\n if len(self.batch_sampler) % self.num_processes == 0:\n+ # If the length is a round multiple of the number of processes, it's easy.\n return len(self.batch_sampler) // self.num_processes\n length = len(self.batch_sampler) // self.num_processes\n- return length if self.drop_last else length + 1\n+ if self.drop_last:\n+ # Same if we drop the remainder.\n+ return length\n+ elif self.even_batches:\n+ # When we even batches we always get +1\n+ return length + 1\n+ else:\n+ # Otherwise it depends on the process index.\n+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length\n \n def __iter__(self):\n return self._iter_with_split() if self.split_batches else self._iter_with_no_split()\n@@ -165,11 +182,15 @@ def _iter_with_split(self):\n \n # If drop_last is True of the last batch was full, iteration is over, otherwise...\n if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\n- while len(initial_data) < self.batch_size:\n- initial_data += initial_data\n- batch = batch + initial_data\n- yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\n+ if not self.even_batches:\n+ if len(batch) > batch_length * self.process_index:\n+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\n+ else:\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\n+ while len(initial_data) < self.batch_size:\n+ initial_data += initial_data\n+ batch = batch + initial_data\n+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\n \n def _iter_with_no_split(self):\n initial_data = []\n@@ -182,35 +203,41 @@ def _iter_with_no_split(self):\n # yielding it.\n if idx % self.num_processes == self.process_index:\n batch_to_yield = batch\n- if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:\n+ if idx % self.num_processes == self.num_processes - 1 and (\n+ self.batch_size is None or len(batch) == self.batch_size\n+ ):\n yield batch_to_yield\n batch_to_yield = []\n \n # If drop_last is True, iteration is over, otherwise...\n if not self.drop_last and len(initial_data) > 0:\n- # ... we yield the complete batch we had saved before if it has the proper length\n- if len(batch_to_yield) == self.batch_size:\n- yield batch_to_yield\n-\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\n- while len(initial_data) < self.num_processes * self.batch_size:\n- initial_data += initial_data\n-\n- # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n- if len(batch) == self.batch_size:\n- batch = []\n- idx += 1\n-\n- # Make sure we yield a multiple of self.num_processes batches\n- cycle_index = 0\n- while idx % self.num_processes != 0 or len(batch) > 0:\n- end_index = cycle_index + self.batch_size - len(batch)\n- batch += initial_data[cycle_index:end_index]\n- if idx % self.num_processes == self.process_index:\n- yield batch\n- cycle_index = end_index\n- batch = []\n- idx += 1\n+ if not self.even_batches:\n+ if len(batch_to_yield) > 0:\n+ yield batch_to_yield\n+ else:\n+ # ... we yield the complete batch we had saved before if it has the proper length\n+ if len(batch_to_yield) == self.batch_size:\n+ yield batch_to_yield\n+\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\n+ while len(initial_data) < self.num_processes * self.batch_size:\n+ initial_data += initial_data\n+\n+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n+ if len(batch) == self.batch_size:\n+ batch = []\n+ idx += 1\n+\n+ # Make sure we yield a multiple of self.num_processes batches\n+ cycle_index = 0\n+ while idx % self.num_processes != 0 or len(batch) > 0:\n+ end_index = cycle_index + self.batch_size - len(batch)\n+ batch += initial_data[cycle_index:end_index]\n+ if idx % self.num_processes == self.process_index:\n+ yield batch\n+ cycle_index = end_index\n+ batch = []\n+ idx += 1\n \n \n class IterableDatasetShard(IterableDataset):\ndiff --git a/tests/test_data_loader.py b/tests/test_data_loader.py\nindex 2d3b6a394..a18dae732 100644\n--- a/tests/test_data_loader.py\n+++ b/tests/test_data_loader.py\n@@ -36,8 +36,11 @@ def __iter__(self):\n \n \n class DataLoaderTester(unittest.TestCase):\n- def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False):\n- batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, split_batches) for i in range(2)]\n+ def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True):\n+ batch_sampler_shards = [\n+ BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches)\n+ for i in range(2)\n+ ]\n batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards]\n if not split_batches:\n self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected])\n@@ -164,6 +167,137 @@ def test_batch_sampler_shards_with_splits(self):\n expected = [[], []]\n self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)\n \n+ def test_batch_sampler_shards_with_no_splits_no_even(self):\n+ # Check the shards when the dataset is a round multiple of total batch size.\n+ batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)\n+ # Expected shouldn't change\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ # Check the shards when the dataset is a round multiple of batch size but not total batch size.\n+ batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ # Check the shards when the dataset is not a round multiple of batch size but has a multiple of\n+ # num_processes batch.\n+ batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of\n+ # num_processes batch.\n+ batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)\n+ expected = [\n+ [[0, 1, 2], [6, 7, 8], [12, 13, 14]],\n+ [[3, 4, 5], [9, 10, 11], [15, 16, 17]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ # Check the shards when the dataset is very small.\n+ batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)\n+ expected = [[[0, 1]], []]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)\n+ expected = [[], []]\n+ self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False)\n+\n+ def test_batch_sampler_shards_with_splits_no_even(self):\n+ # Check the shards when the dataset is a round multiple of batch size.\n+ batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)\n+ expected = [\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)\n+ # Expected shouldn't change\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ # Check the shards when the dataset is not a round multiple of batch size.\n+ batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)\n+ expected = [\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)\n+ expected = [\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ # Check the shards when the dataset is not a round multiple of batch size or num_processes.\n+ batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)\n+ expected = [\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)\n+ expected = [\n+ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],\n+ [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],\n+ ]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ # Check the shards when the dataset is very small.\n+ batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)\n+ expected = [[[0, 1]], []]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)\n+ expected = [[], []]\n+ self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False)\n+\n+ def test_batch_sampler_with_varying_batch_size(self):\n+ batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]\n+ batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)]\n+\n+ self.assertEqual(len(batch_sampler_shards[0]), 3)\n+ self.assertEqual(len(batch_sampler_shards[1]), 2)\n+\n+ self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])\n+ self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])\n+\n def check_iterable_dataset_shards(\n self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False\n ):\n", "code_comments": [ { "body": "```suggestion\r\n Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\r\n multiple of (original batch size / number of processes).\r\n```", "diff_hunk": "@@ -112,6 +112,9 @@ class BatchSamplerShard(BatchSampler):\n this argument is set to `False`.\n - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`\n then `[6, 7]` if this argument is set to `True`.\n+ even_batches (`bool`, *optional*, defaults to `True`):\n+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\n+ multiple of the batch size/number of processes.", "from_author": false }, { "body": "`self.batch_size` can't be `None` else this logic would result in errors. ", "diff_hunk": "@@ -182,35 +201,41 @@ def _iter_with_no_split(self):\n # yielding it.\n if idx % self.num_processes == self.process_index:\n batch_to_yield = batch\n- if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:\n+ if idx % self.num_processes == self.num_processes - 1 and (\n+ self.batch_size is None or len(batch) == self.batch_size\n+ ):\n yield batch_to_yield\n batch_to_yield = []\n \n # If drop_last is True, iteration is over, otherwise...\n if not self.drop_last and len(initial_data) > 0:\n- # ... we yield the complete batch we had saved before if it has the proper length\n- if len(batch_to_yield) == self.batch_size:\n- yield batch_to_yield\n-\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\n- while len(initial_data) < self.num_processes * self.batch_size:\n- initial_data += initial_data\n-\n- # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n- if len(batch) == self.batch_size:\n- batch = []\n- idx += 1\n-\n- # Make sure we yield a multiple of self.num_processes batches\n- cycle_index = 0\n- while idx % self.num_processes != 0 or len(batch) > 0:\n- end_index = cycle_index + self.batch_size - len(batch)\n- batch += initial_data[cycle_index:end_index]\n- if idx % self.num_processes == self.process_index:\n- yield batch\n- cycle_index = end_index\n- batch = []\n- idx += 1\n+ if not self.even_batches:\n+ if len(batch_to_yield) > 0:\n+ yield batch_to_yield\n+ else:\n+ # ... we yield the complete batch we had saved before if it has the proper length\n+ if len(batch_to_yield) == self.batch_size:\n+ yield batch_to_yield\n+\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\n+ while len(initial_data) < self.num_processes * self.batch_size:\n+ initial_data += initial_data\n+\n+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n+ if len(batch) == self.batch_size:\n+ batch = []\n+ idx += 1\n+\n+ # Make sure we yield a multiple of self.num_processes batches\n+ cycle_index = 0\n+ while idx % self.num_processes != 0 or len(batch) > 0:\n+ end_index = cycle_index + self.batch_size - len(batch)\n+ batch += initial_data[cycle_index:end_index]\n+ if idx % self.num_processes == self.process_index:\n+ yield batch\n+ cycle_index = end_index\n+ batch = []\n+ idx += 1", "from_author": false }, { "body": "Yes it can't be `None` if `even_batches` is not `False`, will add an early check for that.", "diff_hunk": "@@ -182,35 +201,41 @@ def _iter_with_no_split(self):\n # yielding it.\n if idx % self.num_processes == self.process_index:\n batch_to_yield = batch\n- if idx % self.num_processes == self.num_processes - 1 and len(batch) == self.batch_size:\n+ if idx % self.num_processes == self.num_processes - 1 and (\n+ self.batch_size is None or len(batch) == self.batch_size\n+ ):\n yield batch_to_yield\n batch_to_yield = []\n \n # If drop_last is True, iteration is over, otherwise...\n if not self.drop_last and len(initial_data) > 0:\n- # ... we yield the complete batch we had saved before if it has the proper length\n- if len(batch_to_yield) == self.batch_size:\n- yield batch_to_yield\n-\n- # For degenerate cases where the dataset has less than num_process * batch_size samples\n- while len(initial_data) < self.num_processes * self.batch_size:\n- initial_data += initial_data\n-\n- # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n- if len(batch) == self.batch_size:\n- batch = []\n- idx += 1\n-\n- # Make sure we yield a multiple of self.num_processes batches\n- cycle_index = 0\n- while idx % self.num_processes != 0 or len(batch) > 0:\n- end_index = cycle_index + self.batch_size - len(batch)\n- batch += initial_data[cycle_index:end_index]\n- if idx % self.num_processes == self.process_index:\n- yield batch\n- cycle_index = end_index\n- batch = []\n- idx += 1\n+ if not self.even_batches:\n+ if len(batch_to_yield) > 0:\n+ yield batch_to_yield\n+ else:\n+ # ... we yield the complete batch we had saved before if it has the proper length\n+ if len(batch_to_yield) == self.batch_size:\n+ yield batch_to_yield\n+\n+ # For degenerate cases where the dataset has less than num_process * batch_size samples\n+ while len(initial_data) < self.num_processes * self.batch_size:\n+ initial_data += initial_data\n+\n+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n+ if len(batch) == self.batch_size:\n+ batch = []\n+ idx += 1\n+\n+ # Make sure we yield a multiple of self.num_processes batches\n+ cycle_index = 0\n+ while idx % self.num_processes != 0 or len(batch) > 0:\n+ end_index = cycle_index + self.batch_size - len(batch)\n+ batch += initial_data[cycle_index:end_index]\n+ if idx % self.num_processes == self.process_index:\n+ yield batch\n+ cycle_index = end_index\n+ batch = []\n+ idx += 1", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/776", "pr_id": 1091301397 }, { "diff": "diff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex 5e26eb9b5..d6ff4908a 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -17,6 +17,7 @@\n import logging\n import os\n import re\n+import subprocess\n import tempfile\n import unittest\n import zipfile\n@@ -85,59 +86,61 @@ def setUp(self):\n self.add_mocks(mock.patch.dict(os.environ, {\"WANDB_DIR\": self.tmpdir}))\n \n @staticmethod\n- def get_value_from_log(key: str, log: str, key_occurrence: int = 0):\n+ def parse_log(log: str, section: str, record: bool = True):\n \"\"\"\n- Parses wandb log for `key` and returns the value.\n- If parsing through multiple calls to .log, pass in a `key_occurrence`\n+ Parses wandb log for `section` and returns a dictionary of\n+ all items in that section. Section names are based on the\n+ output of `wandb sync --view --verbose` and items starting\n+ with \"Record\" in that result\n \"\"\"\n- res = re.findall(rf\"(?<={key} )[^\\s]+\", log)[key_occurrence]\n- if '\"' in res:\n- return re.findall(r'\"([^\"]*)\"', res)[0]\n+ # Big thanks to the W&B team for helping us parse their logs\n+ pattern = rf\"{section} ([\\S\\s]*?)\\n\\n\"\n+ if record:\n+ pattern = rf\"Record: {pattern}\"\n+ cleaned_record = re.findall(pattern, log)[0]\n+ # A config\n+ if section == \"config\" or section == \"history\":\n+ cleaned_record = re.findall(r'\"([a-zA-Z0-9_.,]+)', cleaned_record)\n+ return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])}\n+ # Everything else\n else:\n- return res\n+ return dict(re.findall(r'(\\w+): \"([^\\s]+)\"', cleaned_record))\n \n- def test_init_trackers(self):\n+ def test_wandb(self):\n project_name = \"test_project_with_config\"\n accelerator = Accelerator(log_with=\"wandb\")\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n kwargs = {\"wandb\": {\"tags\": [\"my_tag\"]}}\n accelerator.init_trackers(project_name, config, kwargs)\n- accelerator.end_training()\n- # The latest offline log is stored at wandb/latest-run/*.wandb\n- for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n- logger.info(child)\n- if child.is_file() and child.suffix == \".wandb\":\n- with open(child, \"rb\") as f:\n- content = f.read()\n- break\n-\n- # Check HPS through careful parsing and cleaning\n- cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n- self.assertEqual(self.get_value_from_log(\"num_iterations\", cleaned_log), \"12\")\n- self.assertEqual(self.get_value_from_log(\"learning_rate\", cleaned_log), \"0.01\")\n- self.assertEqual(self.get_value_from_log(\"some_boolean\", cleaned_log), \"false\")\n- self.assertEqual(self.get_value_from_log(\"some_string\", cleaned_log), \"some_value\")\n- self.assertIn(\"my_tag\", cleaned_log)\n-\n- def test_log(self):\n- project_name = \"test_project_with_log\"\n- accelerator = Accelerator(log_with=\"wandb\")\n- accelerator.init_trackers(project_name)\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n accelerator.log(values, step=0)\n accelerator.end_training()\n # The latest offline log is stored at wandb/latest-run/*.wandb\n for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n if child.is_file() and child.suffix == \".wandb\":\n- with open(child, \"rb\") as f:\n- content = f.read()\n+ content = subprocess.check_output(\n+ [\"wandb\", \"sync\", \"--view\", \"--verbose\", str(child)], env=os.environ.copy()\n+ ).decode(\"utf8\", \"ignore\")\n break\n+\n # Check HPS through careful parsing and cleaning\n- cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n- self.assertTrue(\"0.1\" in self.get_value_from_log(\"total_loss\", cleaned_log))\n- self.assertTrue(\"1\" in self.get_value_from_log(\"iteration\", cleaned_log))\n- self.assertTrue(\"some_value\" in self.get_value_from_log(\"my_text\", cleaned_log))\n- self.assertTrue(\"0\" in self.get_value_from_log(\"_step\", cleaned_log))\n+ logged_items = self.parse_log(content, \"config\")\n+ self.assertEqual(logged_items[\"num_iterations\"], \"12\")\n+ self.assertEqual(logged_items[\"learning_rate\"], \"0.01\")\n+ self.assertEqual(logged_items[\"some_boolean\"], \"false\")\n+ self.assertEqual(logged_items[\"some_string\"], \"some_value\")\n+ self.assertEqual(logged_items[\"some_string\"], \"some_value\")\n+\n+ # Run tags\n+ logged_items = self.parse_log(content, \"run\", False)\n+ self.assertEqual(logged_items[\"tags\"], \"my_tag\")\n+\n+ # Actual logging\n+ logged_items = self.parse_log(content, \"history\")\n+ self.assertEqual(logged_items[\"total_loss\"], \"0.1\")\n+ self.assertEqual(logged_items[\"iteration\"], \"1\")\n+ self.assertEqual(logged_items[\"my_text\"], \"some_value\")\n+ self.assertEqual(logged_items[\"_step\"], \"0\")\n \n \n # Comet has a special `OfflineExperiment` we need to use for testing\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/775", "pr_id": 1091296444 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 08ee3a65a..1760e72bc 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -365,7 +365,7 @@ def get_balanced_memory(\n if not torch.cuda.is_available():\n return max_memory\n \n- num_devices = len([d for d in max_memory if torch.device(d).type == \"cuda\"])\n+ num_devices = len([d for d in max_memory if torch.device(d).type == \"cuda\" and max_memory[d] > 0])\n module_sizes = compute_module_sizes(model, dtype=dtype)\n per_gpu = module_sizes[\"\"] // (num_devices - 1 if low_zero else num_devices)\n \n@@ -409,7 +409,7 @@ def get_balanced_memory(\n \n max_memory = get_max_memory(max_memory)\n # The last device is left with max_memory just in case the buffer is not enough.\n- for i in range(num_devices - 1):\n+ for i in range(len(max_memory) - 1):\n max_memory[i] = min(0 if low_zero and i == 0 else per_gpu, max_memory[i])\n \n if low_zero:\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex 1c6f6088d..02ab867d7 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -375,3 +375,7 @@ def test_get_balanced_memory(self):\n # Last device always get max memory to give more buffer and avoid accidental CPU offload\n max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500})\n self.assertDictEqual({0: 215, 1: 500}, max_memory)\n+\n+ # If we set a device to 0, it's not counted.\n+ max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})\n+ self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/774", "pr_id": 1090989496 }, { "diff": "diff --git a/.github/workflows/build-docker-images-release.yml b/.github/workflows/build-docker-images-release.yml\nindex 654259f87..be0c76c48 100644\n--- a/.github/workflows/build-docker-images-release.yml\n+++ b/.github/workflows/build-docker-images-release.yml\n@@ -17,7 +17,7 @@ jobs:\n steps:\n - uses: actions/checkout@v3\n - id: step1\n- run: echo \"::set-output name=version::$(python setup.py --version)\"\n+ run: echo \"version=$(python setup.py --version)\" >> $GITHUB_OUTPUT\n \n version-cpu:\n name: \"Latest Accelerate CPU [version]\"\ndiff --git a/.github/workflows/build_and_run_tests.yml b/.github/workflows/build_and_run_tests.yml\nindex abf9f3883..68e11c44a 100644\n--- a/.github/workflows/build_and_run_tests.yml\n+++ b/.github/workflows/build_and_run_tests.yml\n@@ -16,7 +16,7 @@ jobs:\n outputs:\n changed: ${{ steps.was_changed.outputs.changed }}\n steps:\n- - uses: actions/checkout@v3\n+ - uses: actions/checkout@v3.1.0\n with: \n fetch-depth: \"2\"\n \n@@ -29,7 +29,7 @@ jobs:\n run: |\n for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n if [ `basename \"${file}\"` == \"setup.py\" ]; then\n- echo ::set-output name=changed::\"1\"\n+ echo \"changed=1\" >> $GITHUB_OUTPUT\n fi\n done\n \ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 2959316d8..691397b0d 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -38,7 +38,7 @@ jobs:\n test_rest\n ]\n steps:\n- - uses: actions/checkout@v3\n+ - uses: actions/checkout@v3.1.0\n - name: Set up python 3.7\n uses: actions/setup-python@v3\n with:\n@@ -50,7 +50,7 @@ jobs:\n path: |\n ${{ env.pythonLocation }}\n ${{ env.HF_HOME }}\n- key: ${{ env.pythonLocation }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}\n+ key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}\n \n - name: Install the library\n run: |\n@@ -62,6 +62,8 @@ jobs:\n pip install pytest-reportlog\n \n - name: Run Tests\n+ env: \n+ PYTORCH_VERSION: ${{ matrix.pytorch-version }}\n run: |\n make ${{ matrix.test-kind }}\n \ndiff --git a/Makefile b/Makefile\nindex 558e98347..9aaecb076 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -25,43 +25,43 @@ style:\n \t\n # Run tests for the library\n test:\n-\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log 'all.log',)\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_all.log\",)\n \n test_big_modeling:\n-\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log 'big_modeling.log',)\n+\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_big_modeling.log\",)\n \n test_core:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\n-\t--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log 'core.log',)\n+\t--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_core.log\",)\n \n test_cli:\n-\tpython -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log 'cli.log',)\n+\tpython -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_cli.log\",)\n \n test_deepspeed:\n-\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log 'deepspeed.log',)\n+\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_deepspeed.log\",)\n \n test_fsdp:\n-\tpython -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'fsdp.log',)\n+\tpython -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_fsdp.log\",)\n \n test_examples:\n-\tpython -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log 'examples.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_examples.log\",)\n \n # Broken down example tests for the CI runners\n test_integrations:\n-\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'integrations.log',)\n+\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_integrations.log\",)\n \n test_example_differences:\n-\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log 'example_diff.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_example_diff.log\",)\n \n test_checkpoint_epoch:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(IS_GITHUB_CI),--report-log 'checkpoint_epoch.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_checkpoint_epoch.log\",)\n \n test_checkpoint_step:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(IS_GITHUB_CI),--report-log 'checkpoint_step.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_checkpoint_step.log\",)\n \n # Same as test but used to install only the base dependencies\n test_prod:\n \t$(MAKE) test_core\n \n test_rest:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(IS_GITHUB_CI),--report-log 'rest.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(IS_GITHUB_CI),--report-log \"$(PYTORCH_VERSION)_rest.log\",)\ndiff --git a/utils/log_reports.py b/utils/log_reports.py\nindex f701f08c2..7f790dad3 100644\n--- a/utils/log_reports.py\n+++ b/utils/log_reports.py\n@@ -1,6 +1,5 @@\n import json\n-from pathlib import Path \n-import subprocess\n+from pathlib import Path\n \n failed = []\n passed = []\n@@ -19,16 +18,16 @@\n duration = f'{line[\"duration\"]:.4f}'\n if line.get(\"outcome\", \"\") == \"failed\":\n section_num_failed += 1\n- failed.append([test, duration])\n+ failed.append([test, duration, log.name.split('_')[0]])\n else:\n- passed.append([test, duration])\n+ passed.append([test, duration, log.name.split('_')[0]])\n group_info.append([str(log), section_num_failed])\n \n if len(failed) > 0:\n result = \"## Failed Tests:\\n\"\n- failed_table = '| Test Location | Test Class | Test Name |\\n|---|---|---|\\n| '\n+ failed_table = '| Test Location | Test Class | Test Name | PyTorch Version |\\n|---|---|---|---|\\n| '\n for test in failed:\n failed_table += ' | '.join(test[0].split(\"::\"))\n- failed_table += \" |\"\n+ failed_table += f\" | {test[2]} |\"\n result += failed_table\n print(result)\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thank you @muellerzr for updating my knowledge!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/773", "pr_id": 1090961915 }, { "diff": "diff --git a/.github/workflows/run_merge_tests.yml b/.github/workflows/run_merge_tests.yml\nindex a794cd72b..fcbb62f53 100644\n--- a/.github/workflows/run_merge_tests.yml\n+++ b/.github/workflows/run_merge_tests.yml\n@@ -29,6 +29,11 @@ jobs:\n pip install -e .[testing,test_trackers]\n pip install pytest-reportlog\n \n+ - name: Run CLI tests\n+ run: |\n+ source activate accelerate\n+ make test_cli\n+ \n - name: Run test on GPUs\n run: |\n source activate accelerate\n@@ -62,6 +67,11 @@ jobs:\n pip install -e .[testing,test_trackers]\n pip install pytest-reportlog\n \n+ - name: Run CLI tests\n+ run: |\n+ source activate accelerate\n+ make test_cli\n+\n - name: Run test on GPUs\n run: |\n source activate accelerate\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 593eec741..2959316d8 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -28,6 +28,7 @@ jobs:\n test-kind: [\n test_prod,\n test_core,\n+ test_cli,\n test_big_modeling,\n test_deepspeed,\n test_fsdp,\ndiff --git a/Makefile b/Makefile\nindex 52c594ae7..558e98347 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -32,7 +32,10 @@ test_big_modeling:\n \n test_core:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\n-\t--ignore=./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'core.log',)\n+\t--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log 'core.log',)\n+\n+test_cli:\n+\tpython -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log 'cli.log',)\n \n test_deepspeed:\n \tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log 'deepspeed.log',)\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 65e9be9c7..fa7588b0a 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -432,7 +432,7 @@ def simple_launcher(args):\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\n if args.use_mps_device:\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n- elif args.gpu_ids != \"all\":\n+ elif args.gpu_ids != \"all\" and args.gpu_ids is not None:\n current_env[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\n if args.num_machines > 1:\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\n@@ -489,7 +489,7 @@ def multi_gpu_launcher(args):\n \n current_env = os.environ.copy()\n gpu_ids = getattr(args, \"gpu_ids\")\n- if gpu_ids != \"all\":\n+ if gpu_ids != \"all\" and args.gpu_ids is not None:\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\n mixed_precision = args.mixed_precision.lower()\n try:\n@@ -637,7 +637,7 @@ def deepspeed_launcher(args):\n \n current_env = os.environ.copy()\n gpu_ids = getattr(args, \"gpu_ids\")\n- if gpu_ids != \"all\":\n+ if gpu_ids != \"all\" and args.gpu_ids is not None:\n current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\n try:\n mixed_precision = PrecisionType(args.mixed_precision.lower())\n@@ -947,7 +947,7 @@ def launch_command(args):\n else:\n if args.num_processes is None:\n args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\n- warned.append(\"\\t`--num_processes` was set to a value of `{args.num_processes}`\")\n+ warned.append(f\"\\t`--num_processes` was set to a value of `{args.num_processes}`\")\n if args.num_machines is None:\n warned.append(\"\\t`--num_machines` was set to a value of `1`\")\n args.num_machines = 1\ndiff --git a/src/accelerate/test_utils/scripts/test_cli.py b/src/accelerate/test_utils/scripts/test_cli.py\nnew file mode 100644\nindex 000000000..491410e5f\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_cli.py\n@@ -0,0 +1,13 @@\n+import torch\n+\n+\n+def main():\n+ if torch.cuda.is_available():\n+ num_gpus = torch.cuda.device_count()\n+ else:\n+ num_gpus = 0\n+ print(f\"Successfully ran on {num_gpus} GPUs\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/tests/test_cli.py b/tests/test_cli.py\nnew file mode 100644\nindex 000000000..ceed58290\n--- /dev/null\n+++ b/tests/test_cli.py\n@@ -0,0 +1,65 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import os\n+import unittest\n+from pathlib import Path\n+\n+import torch\n+\n+import accelerate\n+from accelerate.test_utils import execute_subprocess_async\n+\n+\n+class AccelerateLauncherTester(unittest.TestCase):\n+ \"\"\"\n+ Test case for verifying the `accelerate launch` CLI operates correctly.\n+ If a `default_config.yaml` file is located in the cache it will temporarily move it\n+ for the duration of the tests.\n+ \"\"\"\n+\n+ mod_file = inspect.getfile(accelerate.test_utils)\n+ test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_cli.py\"])\n+\n+ base_cmd = [\"accelerate\", \"launch\"]\n+ config_folder = Path.home() / \".cache/huggingface/accelerate\"\n+ config_file = \"default_config.yaml\"\n+ config_path = config_folder / config_file\n+ changed_path = config_folder / \"_default_config.yaml\"\n+\n+ test_config_path = Path(\"tests/test_configs\")\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ if cls.config_path.is_file():\n+ cls.config_path.rename(cls.changed_path)\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ if cls.changed_path.is_file():\n+ cls.changed_path.rename(cls.config_path)\n+\n+ def test_no_config(self):\n+ cmd = self.base_cmd\n+ if torch.cuda.is_available() and (torch.cuda.device_count() > 1):\n+ cmd += [\"--multi_gpu\"]\n+ execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy())\n+\n+ def test_config_compatibility(self):\n+ for config in sorted(self.test_config_path.glob(\"**/*.yaml\")):\n+ with self.subTest(config_file=config):\n+ execute_subprocess_async(\n+ self.base_cmd + [\"--config_file\", str(config), self.test_file_path], env=os.environ.copy()\n+ )\ndiff --git a/tests/test_configs/0_11_0.yaml b/tests/test_configs/0_11_0.yaml\nnew file mode 100644\nindex 000000000..9ef829e6b\n--- /dev/null\n+++ b/tests/test_configs/0_11_0.yaml\n@@ -0,0 +1,12 @@\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: 'NO'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n\\ No newline at end of file\ndiff --git a/tests/test_configs/0_12_0.yaml b/tests/test_configs/0_12_0.yaml\nnew file mode 100644\nindex 000000000..00d06aab9\n--- /dev/null\n+++ b/tests/test_configs/0_12_0.yaml\n@@ -0,0 +1,13 @@\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: 'NO'\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n\\ No newline at end of file\ndiff --git a/tests/test_configs/README.md b/tests/test_configs/README.md\nnew file mode 100644\nindex 000000000..fd88d066c\n--- /dev/null\n+++ b/tests/test_configs/README.md\n@@ -0,0 +1,2 @@\n+This folder contains test configs for `accelerate config`. These should be generated for each major version\n+and are written based on `accelerate config` and selecting the \"No distributed training\" option.\n\\ No newline at end of file\ndiff --git a/tests/test_configs/latest.yaml b/tests/test_configs/latest.yaml\nnew file mode 100644\nindex 000000000..87b294cf8\n--- /dev/null\n+++ b/tests/test_configs/latest.yaml\n@@ -0,0 +1,17 @@\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: 'NO'\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+gpu_ids: all\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config: {}\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n", "code_comments": [ { "body": "This addition is needed for the test case of `CUDA_VISIBLE_DEVICES=\"\"`", "diff_hunk": "@@ -489,7 +489,7 @@ def multi_gpu_launcher(args):\n \n current_env = os.environ.copy()\n gpu_ids = getattr(args, \"gpu_ids\")\n- if gpu_ids != \"all\":\n+ if gpu_ids != \"all\" and args.gpu_ids is not None:", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/772", "pr_id": 1090925271 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 9d57c144f..83c6738af 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -143,6 +143,8 @@ def cpu_offload(\n execution_device = next(iter(model.parameters())).device\n if state_dict is None:\n state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)\n attach_align_device_hook(\n model,\n execution_device=execution_device,\n@@ -151,7 +153,7 @@ def cpu_offload(\n weights_map=state_dict,\n preload_module_classes=preload_module_classes,\n )\n- add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+\n return model\n \n \n@@ -189,6 +191,8 @@ def disk_offload(\n if execution_device is None:\n execution_device = next(iter(model.parameters())).device\n weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)\n attach_align_device_hook(\n model,\n execution_device=execution_device,\n@@ -197,7 +201,7 @@ def disk_offload(\n weights_map=weights_map,\n preload_module_classes=preload_module_classes,\n )\n- add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+\n return model\n \n \ndiff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\nindex 493444daa..120bb7f42 100644\n--- a/src/accelerate/hooks.py\n+++ b/src/accelerate/hooks.py\n@@ -108,26 +108,34 @@ def detach_hook(self, module):\n return module\n \n \n-def add_hook_to_module(module: nn.Module, hook: ModelHook):\n+def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):\n \"\"\"\n Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\n this behavior and restore the original `forward` method, use `remove_hook_from_module`.\n \n <Tip warning={true}>\n \n- If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\n- use the `SequentialHook` class.\n+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks\n+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.\n \n </Tip>\n \n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`, *optional*, defaults to `False`):\n+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.\n \n Returns:\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n be discarded).\n \"\"\"\n+\n+ if append and (getattr(module, \"_hf_hook\", None) is not None):\n+ old_hook = module._hf_hook\n+ remove_hook_from_module(module)\n+ hook = SequentialHook(old_hook, hook)\n+\n if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\n # If we already put some hook on this module, we replace it with the new one.\n old_forward = module._old_forward\n@@ -352,7 +360,7 @@ def attach_align_device_hook(\n offload_buffers=offload_buffers,\n place_submodules=full_offload,\n )\n- add_hook_to_module(module, hook)\n+ add_hook_to_module(module, hook, append=True)\n \n # We stop the recursion in case we hit the full offload.\n if full_offload:\ndiff --git a/tests/test_hooks.py b/tests/test_hooks.py\nindex 9d48db9e1..e3824809c 100644\n--- a/tests/test_hooks.py\n+++ b/tests/test_hooks.py\n@@ -69,6 +69,25 @@ def test_add_and_remove_hooks(self):\n self.assertFalse(hasattr(test_model, \"_hf_hook\"))\n self.assertFalse(hasattr(test_model, \"_old_forward\"))\n \n+ def test_append_and_remove_hooks(self):\n+ test_model = ModelForTest()\n+ test_hook = ModelHook()\n+\n+ add_hook_to_module(test_model, test_hook)\n+ add_hook_to_module(test_model, test_hook, append=True)\n+\n+ self.assertEqual(isinstance(test_model._hf_hook, SequentialHook), True)\n+ self.assertEqual(len(test_model._hf_hook.hooks), 2)\n+ self.assertTrue(hasattr(test_model, \"_old_forward\"))\n+\n+ # Check adding the hook did not change the name or the signature\n+ self.assertEqual(test_model.forward.__name__, \"forward\")\n+ self.assertListEqual(list(inspect.signature(test_model.forward).parameters), [\"x\"])\n+\n+ remove_hook_from_module(test_model)\n+ self.assertFalse(hasattr(test_model, \"_hf_hook\"))\n+ self.assertFalse(hasattr(test_model, \"_old_forward\"))\n+\n def test_pre_forward_hook_is_executed(self):\n test_model = ModelForTest()\n x = torch.randn(2, 3)\n", "code_comments": [ { "body": "```suggestion\r\n append (`bool`, *optional*, defaults to `False`):\r\n Whether the hook should be chained with an existing one (if module already contains a hook) or not.\r\n```\r\n\r\nAlso can't comment on line 119 but this should also be mentioned in the Note.", "diff_hunk": "@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook", "from_author": false }, { "body": "Those two ifs can be grouped.\r\nAlso the second test can be simplified in `getattr(module, \"_hf_hook\", None) is not None`", "diff_hunk": "@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\n \n Returns:\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n be discarded).\n \"\"\"\n+\n+ if append:\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):", "from_author": false }, { "body": "```suggestion\r\n hook = SequentialHook(old_hook, hook)\r\n```", "diff_hunk": "@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\n \n Returns:\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n be discarded).\n \"\"\"\n+\n+ if append:\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):\n+ old_hook = module._hf_hook\n+ remove_hook_from_module(module)\n+\n+ hooks = [old_hook, hook]\n+ hook = SequentialHook(*hooks)", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook", "from_author": true }, { "body": "Done, thanks for the great idea. ", "diff_hunk": "@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\n \n Returns:\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n be discarded).\n \"\"\"\n+\n+ if append:\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):", "from_author": true }, { "body": "Done. Looks much better now.", "diff_hunk": "@@ -123,11 +123,21 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook):\n Args:\n module (`torch.nn.Module`): The module to attach a hook to.\n hook (`ModelHook`): The hook to attach.\n+ append (`bool`): Whether, if module already contains a hook, should chain the new one with SequentialHook\n \n Returns:\n `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n be discarded).\n \"\"\"\n+\n+ if append:\n+ if hasattr(module, \"_hf_hook\") and (module._hf_hook is not None):\n+ old_hook = module._hf_hook\n+ remove_hook_from_module(module)\n+\n+ hooks = [old_hook, hook]\n+ hook = SequentialHook(*hooks)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "The fix is not exactly right: by doing so, the hook that ensures the input and output of the model are on the same device is now erased. In your code sample in #767, since `x` is on the CPU, `net(x)` should also be on the CPU. This is not the case with your PR. The solution would be to write a util function that will:\r\n- just add the hook if none is present\r\n- extract the current hook if one is present and chain it with this hook using a `SequentialHook`.\r\n\r\nThis is slightly more advanced than the current PR, so let me know if you'd prefer for me to do it :-)", "from_author": false }, { "body": "@sgugger I would like to try if that's ok to you. \r\n\r\nWhat do you think of creating an `append_if_needed` flag on `add_hook_to_module` that does what you just said?", "from_author": true }, { "body": "That works for me, though the name of the argument could simply be `append` :-)\r\nThanks for diving into this!", "from_author": false }, { "body": "@sgugger `append` it is.", "from_author": true }, { "body": "@sgugger it is ready for review, I've also added the tests. ", "from_author": true }, { "body": "> Very nice, thanks! Left a couple of nits, and I think you should still put the hook with io first: just tested locally and we still have the same issue of `net(x)` being on the wrong device since it runs second and the input was already moved.\r\n\r\n@sgugger I've just addressed your nits and moved the io hook to the top. Thanks for the review! I've tested it locally on the snippet of the bug report and it brings the tensor back to CPU after the inference. Thanks!", "from_author": true }, { "body": "@sgugger, there is a test step that failed due to an http error when installing a lib. I've created and empty commit to try running it again. ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/768", "pr_id": 1090006484 }, { "diff": "diff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\nindex bc0b90701..d26ecd4b5 100644\n--- a/docs/source/usage_guides/megatron_lm.mdx\n+++ b/docs/source/usage_guides/megatron_lm.mdx\n@@ -103,65 +103,9 @@ cd ..\n \n 4. Installing Megatron-LM\n \n- a. Cloning the Megatron-LM repo\n- ```\n- git clone https://github.com/NVIDIA/Megatron-LM.git\n- cd Megatron-LM\n- ```\n-\n- b. Create a file `setup.py`, paste the below code and put in the root folder\n- ```python\n- \"\"\"Setup for pip package.\"\"\"\n-\n- import os\n- import sys\n- import setuptools\n-\n- if sys.version_info < (3,):\n- raise Exception(\"Python 2 is not supported by Megatron.\")\n-\n- with open(\"README.md\", \"r\") as fh:\n- long_description = fh.read()\n-\n- setuptools.setup(\n- name=\"megatron-lm\",\n- version=\"3.0.0\",\n- description=\"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism.\",\n- long_description=long_description,\n- long_description_content_type=\"text/markdown\",\n- # The project's main homepage.\n- url=\"https://github.com/NVIDIA/Megatron-LM\",\n- author=\"NVIDIA INC\",\n- maintainer=\"NVIDIA INC\",\n- # The licence under which the project is released\n- license=\"See https://github.com/NVIDIA/Megatron-LM/blob/master/LICENSE\",\n- classifiers=[\n- \"Intended Audience :: Developers\",\n- \"Intended Audience :: Science/Research\",\n- \"Intended Audience :: Information Technology\",\n- # Indicate what your project relates to\n- \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n- \"Topic :: Software Development :: Libraries :: Python Modules\",\n- # Additional Setting\n- \"Environment :: Console\",\n- \"Natural Language :: English\",\n- \"Operating System :: OS Independent\",\n- ],\n- python_requires=\">=3.6\",\n- packages=setuptools.find_packages(),\n- install_requires=[\"nltk\", \"six\", \"regex\", \"torch>=1.12.0\", \"pybind11\"],\n- # Add in any packaged data.\n- include_package_data=True,\n- zip_safe=False,\n- # PyPI package information.\n- keywords=\"deep learning, Megatron, gpu, NLP, nvidia, pytorch, torch, language\",\n- )\n- ```\n-\n- c. installing via below command\n- ```\n- pip install \".\"\n- ```\n+```\n+pip install git+https://github.com/huggingface/Megatron-LM.git\n+```\n \n ## Accelerate Megatron-LM Plugin\n \n@@ -183,7 +127,7 @@ What is the number of micro-batches? [1]:2\n Do you want to enable selective activation recomputation? [YES/no]: \n Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]: \n What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \n-How many GPU(s) should be used for distributed training? [1]:8\n+How many GPU(s) should be used for distributed training? [1]:4\n Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16\n ```\n \n@@ -210,7 +154,7 @@ megatron_lm_config:\n megatron_lm_use_distributed_optimizer: true\n mixed_precision: bf16\n num_machines: 1\n-num_processes: 8\n+num_processes: 4\n rdzv_backend: static\n same_network: true\n use_cpu: false\n@@ -289,8 +233,8 @@ examples/by_feature/megatron_lm_gpt_pretraining.py \\\n --dataset_config_name wikitext-2-raw-v1 \\\n --block_size 1024 \\\n --learning_rate 5e-5 \\\n---per_device_train_batch_size 4 \\\n---per_device_eval_batch_size 4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n --num_train_epochs 5 \\\n --with_tracking \\\n --report_to \"wandb\" \\\ndiff --git a/examples/by_feature/megatron_lm_gpt_pretraining.py b/examples/by_feature/megatron_lm_gpt_pretraining.py\nindex 68a425e4d..3fcf10207 100644\n--- a/examples/by_feature/megatron_lm_gpt_pretraining.py\n+++ b/examples/by_feature/megatron_lm_gpt_pretraining.py\n@@ -672,8 +672,10 @@ def group_texts(examples):\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n \n- if args.with_tracking:\n- accelerator.end_training()\n+ # this is causing some issue with Megatron-LM when using `wandb` at the end of the main function.\n+ # Everything works fine inspite of commenting this out. (wandb finishes/closes the run without error)\n+ # if args.with_tracking:\n+ # accelerator.end_training()\n \n if args.output_dir is not None:\n accelerator.wait_for_everyone()\n", "code_comments": [ { "body": "Maybe add some comment here on why it's not working for now and plans to fix in the future?", "diff_hunk": "@@ -672,8 +672,8 @@ def group_texts(examples):\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n \n- if args.with_tracking:\n- accelerator.end_training()\n+ # if args.with_tracking:\n+ # accelerator.end_training()", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -672,8 +672,8 @@ def group_texts(examples):\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n \n- if args.with_tracking:\n- accelerator.end_training()\n+ # if args.with_tracking:\n+ # accelerator.end_training()", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Hello @sgugger, wrt maintaining fork, sure, I can do that. This would help in making sure the functionality works with accelerate and we can extend it for more features (main repo is barely active).", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/766", "pr_id": 1089393747 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex e1282c0bb..bf17f57ce 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -777,10 +777,11 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n if device_placement:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n- kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n- model = torch.nn.parallel.DistributedDataParallel(\n- model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n- )\n+ if any(p.requires_grad for p in model.parameters()):\n+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n+ model = torch.nn.parallel.DistributedDataParallel(\n+ model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n+ )\n elif self.distributed_type == DistributedType.FSDP:\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/761", "pr_id": 1087746985 }, { "diff": "diff --git a/README.md b/README.md\nindex deddf2986..7b912dd80 100644\n--- a/README.md\n+++ b/README.md\n@@ -243,6 +243,7 @@ pip install accelerate\n - FP16 with native AMP (apex on the roadmap)\n - DeepSpeed support (Experimental)\n - PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)\n+- Megatron-LM support (Experimental)\n \n ## Citing πŸ€— Accelerate\n \ndiff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\nindex 188dddb32..bc0b90701 100644\n--- a/docs/source/usage_guides/megatron_lm.mdx\n+++ b/docs/source/usage_guides/megatron_lm.mdx\n@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n \n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\n+Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\n+\n \n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n-Layers are distributed uniformly across PP stages.\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\n \n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n-\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\n+It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks \n+post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. \n+As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. \n+To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., \n+if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. \n+This increases the batch size that can be supported for training. For more details, please refer to the research paper\n+[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). \n+\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks\n+(versus the traditional method of replicating the optimizer state across data parallel ranks). \n+For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory.\n+This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs.\n+For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion\n+Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of πŸ€— blog \n+[The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism).\n \n e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing.\n It doesn't store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation.\n+For example, for GPT-3, this leads to 70% reduction in required memory for activations at the expense of\n+only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper \n+[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf).\n \n f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.\n PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition.\ndiff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\nindex cc388acac..444784b49 100644\n--- a/docs/source/usage_guides/training_zoo.mdx\n+++ b/docs/source/usage_guides/training_zoo.mdx\n@@ -36,6 +36,7 @@ These examples showcase specific features that the Accelerate framework offers\n - [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py)\n - [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py)\n - [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py)\n+- [Using Megatron-LM](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/megatron_lm_gpt_pretraining.py)\n \n ### Full Examples \n \n@@ -55,6 +56,8 @@ These examples showcase every feature in Accelerate at once that was shown in \"F\n - [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py)\n - [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py)\n - [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py)\n+- [End-to-end examples on how to use AWS SageMaker integration of Accelerate](https://github.com/huggingface/notebooks/blob/main/sagemaker/22_accelerate_sagemaker_examples/README.md)\n+- [Megatron-LM examples for various NLp tasks](https://github.com/pacman100/accelerate-megatron-test) \n \n ## Integration Examples \n \n", "code_comments": [ { "body": "```suggestion\r\nEach tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed\r\n```", "diff_hunk": "@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n \n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed", "from_author": false }, { "body": "```suggestion\r\nIt reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks \r\n```", "diff_hunk": "@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n \n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\n+\n \n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n-Layers are distributed uniformly across PP stages.\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\n \n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n-\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\n+It reduced activation memory required as it prevents same copies to be on the tensor parallel ranks ", "from_author": false }, { "body": "```suggestion\r\npost `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. \r\n```", "diff_hunk": "@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n \n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\n+\n \n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n-Layers are distributed uniformly across PP stages.\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\n \n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n-\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\n+It reduced activation memory required as it prevents same copies to be on the tensor parallel ranks \n+post `all-reduce` by replacing it with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. ", "from_author": false }, { "body": "```suggestion\r\nd. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks\r\n```", "diff_hunk": "@@ -23,18 +23,45 @@ For detailed information and how things work behind the scene please refer the g\n Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n \n-a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks.\n+Each tensor are split into multiple chunks with each shard residing on separate GPU. At each step, same minimbatch of data is processed\n+independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). \n+In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.\n+For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using\n+Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).\n+\n \n b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n-Layers are distributed uniformly across PP stages.\n+Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for\n+pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP,\n+please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters\n+Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and \n+this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism).\n \n c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n-\n-d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\n+It reduced activation memory required as it prevents same copies to be on the tensor parallel ranks \n+post `all-reduce` by replacing it with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. \n+As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. \n+To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., \n+if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. \n+This increases the batch size that can be supported for training. For more details, please refer to the research paper\n+[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). \n+\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/759", "pr_id": 1087176410 }, { "diff": "diff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex 934923bef..f31027ee1 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -58,3 +58,5 @@\n \"master_addr\",\n \"master_port\",\n ]\n+\n+CUDA_DISTRIBUTED_TYPES = [\"DEEPSPEED\", \"MULTI_GPU\", \"FSDP\", \"MEGATRON_LM\"]\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex aef1fd589..7ba4482cc 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -24,6 +24,7 @@\n from torch.distributed import ReduceOp\n \n from ..state import AcceleratorState\n+from .constants import CUDA_DISTRIBUTED_TYPES\n from .dataclasses import DistributedType, TensorInformation\n from .imports import is_tpu_available\n from .versions import is_torch_version\n@@ -220,12 +221,7 @@ def gather(tensor):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n- elif AcceleratorState().distributed_type in [\n- DistributedType.DEEPSPEED,\n- DistributedType.MULTI_GPU,\n- DistributedType.FSDP,\n- DistributedType.MEGATRON_LM,\n- ]:\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\n return _gpu_gather(tensor)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather(tensor)\n@@ -258,12 +254,7 @@ def gather_object(object: Any):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n raise NotImplementedError(\"gather objects in TPU is not supported\")\n- elif AcceleratorState().distributed_type in [\n- DistributedType.DEEPSPEED,\n- DistributedType.MULTI_GPU,\n- DistributedType.FSDP,\n- DistributedType.MEGATRON_LM,\n- ]:\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\n return _gpu_gather_object(object)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather_object(object)\n@@ -302,12 +293,7 @@ def broadcast(tensor, from_process: int = 0):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_broadcast(tensor, src=from_process, name=\"accelerate.utils.broadcast\")\n- elif AcceleratorState().distributed_type in [\n- DistributedType.DEEPSPEED,\n- DistributedType.MULTI_GPU,\n- DistributedType.FSDP,\n- DistributedType.MEGATRON_LM,\n- ]:\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\n return _gpu_broadcast(tensor, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _gpu_broadcast(tensor, src=from_process)\n@@ -331,12 +317,7 @@ def broadcast_object_list(object_list, from_process: int = 0):\n if AcceleratorState().distributed_type == DistributedType.TPU:\n for i, obj in enumerate(object_list):\n object_list[i] = xm.mesh_reduce(\"accelerate.utils.broadcast_object_list\", obj, lambda x: x[from_process])\n- elif AcceleratorState().distributed_type in [\n- DistributedType.DEEPSPEED,\n- DistributedType.MULTI_GPU,\n- DistributedType.FSDP,\n- DistributedType.MEGATRON_LM,\n- ]:\n+ elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n@@ -452,12 +433,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\n if state.distributed_type == DistributedType.TPU:\n xm.all_reduce(\"sum\", cloned_tensor)\n return cloned_tensor\n- elif state.distributed_type in [\n- DistributedType.DEEPSPEED,\n- DistributedType.MULTI_GPU,\n- DistributedType.FSDP,\n- DistributedType.MEGATRON_LM,\n- ]:\n+ elif state.distributed_type.value in CUDA_DISTRIBUTED_TYPES:\n torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\n return cloned_tensor\n else:\ndiff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\nindex 8dc149a02..01c4df2af 100644\n--- a/src/accelerate/utils/random.py\n+++ b/src/accelerate/utils/random.py\n@@ -19,6 +19,7 @@\n import torch\n \n from ..state import AcceleratorState\n+from .constants import CUDA_DISTRIBUTED_TYPES\n from .dataclasses import DistributedType, RNGType\n from .imports import is_tpu_available\n \n@@ -64,12 +65,7 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n- elif state.distributed_type in [\n- DistributedType.DEEPSPEED,\n- DistributedType.MULTI_GPU,\n- DistributedType.FSDP,\n- DistributedType.MEGATRON_LM,\n- ]:\n+ elif state.distributed_type in CUDA_DISTRIBUTED_TYPES:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/758", "pr_id": 1087066669 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex e40797b39..e1282c0bb 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -1194,6 +1194,9 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \"\"\"\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n \n+ Returns:\n+ `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).\n+\n Example:\n \n ```python\n@@ -1217,13 +1220,13 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n parameters = [p for p in parameters]\n for model in self._models:\n if parameters == [p for p in model.parameters()]:\n- model.clip_grad_norm_(max_norm, norm_type)\n- return\n+ return model.clip_grad_norm_(max_norm, norm_type)\n elif self.distributed_type == DistributedType.DEEPSPEED:\n # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n- return\n+ # We cannot return the gradient norm because DeepSpeed does it.\n+ return None\n self.unscale_gradients()\n- torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n+ return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n \n def clip_grad_value_(self, parameters, clip_value):\n \"\"\"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/756", "pr_id": 1086805522 }, { "diff": "diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 58009d03b..bdb53988a 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -18,7 +18,9 @@\n \n import torch\n \n-from .versions import is_torch_version\n+from packaging.version import parse\n+\n+from .versions import compare_versions, is_torch_version\n \n \n # The package importlib_metadata is in a different place, depending on the Python version.\n@@ -88,7 +90,11 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- return importlib.util.find_spec(\"megatron\") is not None\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\n+ if package_exists:\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\n+ return False\n \n \n def is_transformers_available():\n", "code_comments": [ { "body": "Redundant `else` :)", "diff_hunk": "@@ -88,7 +90,12 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- return importlib.util.find_spec(\"megatron\") is not None\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\n+ if package_exists:\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\n+ else:", "from_author": false }, { "body": "Good point, thanks @tjruwase :)", "diff_hunk": "@@ -88,7 +90,12 @@ def is_bf16_available(ignore_tpu=False):\n \n \n def is_megatron_lm_available():\n- return importlib.util.find_spec(\"megatron\") is not None\n+ package_exists = importlib.util.find_spec(\"megatron\") is not None\n+ if package_exists:\n+ megatron_version = parse(importlib_metadata.version(\"megatron-lm\"))\n+ return compare_versions(megatron_version, \">=\", \"2.2.0\")\n+ else:", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thank you for the fix ✨. LGTM!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/755", "pr_id": 1086523048 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex f348bb0a6..3e5eff45e 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -878,15 +878,17 @@ def launch_command(args):\n args.use_cpu = args.cpu\n \n if args.num_cpu_threads_per_process is None:\n- local_size = get_int_from_env(\n- [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n- )\n- args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n- if args.num_cpu_threads_per_process == 0:\n- args.num_cpu_threads_per_process = 1\n- warned.append(\n- f\"\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance\"\n- )\n+ args.num_cpu_threads_per_process = 1\n+ if args.use_cpu and args.num_processes > 1:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process > 1:\n+ args.num_cpu_threads_per_process = threads_per_process\n+ warned.append(\n+ f\"\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs\"\n+ )\n \n if any(warned):\n message = \"The following values were not passed to `accelerate launch` and had defaults used instead:\\n\"\n", "code_comments": [ { "body": "This means a chosen value of 1 is erased. Let's leave the default at `None` and change it at 1 when not in distributed CPU, or the other default when in distributed CPU.", "diff_hunk": "@@ -877,15 +877,15 @@ def launch_command(args):\n if not hasattr(args, \"use_cpu\"):\n args.use_cpu = args.cpu\n \n- if args.num_cpu_threads_per_process is None:\n+ if args.num_cpu_threads_per_process == 1 and args.use_cpu:", "from_author": false }, { "body": "Adjusted :) ", "diff_hunk": "@@ -877,15 +877,15 @@ def launch_command(args):\n if not hasattr(args, \"use_cpu\"):\n args.use_cpu = args.cpu\n \n- if args.num_cpu_threads_per_process is None:\n+ if args.num_cpu_threads_per_process == 1 and args.use_cpu:", "from_author": true }, { "body": "```suggestion\r\n if args.num_cpu_threads_per_process is None:\r\n```\r\nLet's not rely on Python bool conversion magic please :-)", "diff_hunk": "@@ -877,16 +877,18 @@ def launch_command(args):\n if not hasattr(args, \"use_cpu\"):\n args.use_cpu = args.cpu\n \n- if args.num_cpu_threads_per_process is None:\n- local_size = get_int_from_env(\n- [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n- )\n- args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n- if args.num_cpu_threads_per_process == 0:\n- args.num_cpu_threads_per_process = 1\n- warned.append(\n- f\"\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance\"\n- )\n+ if not args.num_cpu_threads_per_process:", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/753", "pr_id": 1083848321 }, { "diff": "diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 43faf45ca..716d5f602 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -165,6 +165,7 @@ class SageMakerConfig(BaseConfig):\n profile: Optional[str] = None\n region: str = \"us-east-1\"\n num_machines: int = 1\n+ gpu_ids: str = \"all\"\n base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\n pytorch_version: str = SAGEMAKER_PYTORCH_VERSION\n transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/751", "pr_id": 1083718310 }, { "diff": "diff --git a/docs/source/index.mdx b/docs/source/index.mdx\nindex 1664fd7a2..a87bd6e32 100644\n--- a/docs/source/index.mdx\n+++ b/docs/source/index.mdx\n@@ -51,19 +51,19 @@ accelerate launch {my_script.py}\n \n <div class=\"mt-10\">\n <div class=\"w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5\">\n- <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/basic_tutorials/overview\"\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"./basic_tutorials/overview\"\n ><div class=\"w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Tutorials</div>\n <p class=\"text-gray-700\">Learn the basics and become familiar with using πŸ€— Accelerate. Start here if you are using πŸ€— Accelerate for the first time!</p>\n </a>\n- <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/usage_guides/gradient_accumulation\"\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"./usage_guides/gradient_accumulation\"\n ><div class=\"w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">How-to guides</div>\n <p class=\"text-gray-700\">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use πŸ€— Accelerate to solve real-world problems.</p>\n </a>\n- <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/concept_guides/gradient_synchronization\"\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"./concept_guides/gradient_synchronization\"\n ><div class=\"w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Conceptual guides</div>\n <p class=\"text-gray-700\">High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.</p>\n </a>\n- <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/package_reference/accelerator\"\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"./package_reference/accelerator\"\n ><div class=\"w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Reference</div>\n <p class=\"text-gray-700\">Technical descriptions of how πŸ€— Accelerate classes and methods work.</p>\n </a>\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/749", "pr_id": 1081649357 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 0b57ae6d8..08ee3a65a 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -258,7 +258,7 @@ def get_max_layer_size(\n modules_to_treat = modules.copy()\n while len(modules_to_treat) > 0:\n module_name, module = modules_to_treat.pop(0)\n- modules_children = list(module.named_children())\n+ modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else []\n if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n # No splitting this one so we compare to the max_size\n size = module_sizes[module_name]\n", "code_comments": [ { "body": "```suggestion\r\n modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else []\r\n```", "diff_hunk": "@@ -258,7 +258,7 @@ def get_max_layer_size(\n modules_to_treat = modules.copy()\n while len(modules_to_treat) > 0:\n module_name, module = modules_to_treat.pop(0)\n- modules_children = list(module.named_children())\n+ modules_children = list(module.named_children()) if not isinstance(module, torch.nn.Parameter) else []", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> What's a reproducer? I'm not sure how you get to a parameter here since we only apply the `children` method.\r\n\r\nHaha that was fast. Here a repro:\r\n\r\n```py\r\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\r\n\r\nStableDiffusionSafetyChecker._no_split_modules = [\"CLIPEncoderLayer\"]\r\npipe = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker', device_map='auto')\r\n```\r\n\r\n(sorry, it's 1GB of download)", "from_author": true }, { "body": "Which branch should I check? On main I just get an error telling me device_map auto is not supported yet", "from_author": false }, { "body": "Ah sorry just updated it above, can you try:\r\n\r\n```python\r\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\r\n\r\nStableDiffusionSafetyChecker._no_split_modules = [\"CLIPEncoderLayer\"]\r\npipe = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker', device_map='auto')\r\n```", "from_author": true }, { "body": "Awesome! Can I merge after one :heavy_check_mark: here or wait for another reviewer? ", "from_author": true }, { "body": "Nope, you can go ahead and merge!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/747", "pr_id": 1080567797 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 01c419f4f..e86a0abff 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -35,6 +35,7 @@ def get_cluster_input():\n \n machine_rank = 0\n num_machines = 1\n+ num_processes = 1\n gpu_ids = None\n main_process_ip = None\n main_process_port = None\n@@ -294,12 +295,6 @@ def get_cluster_input():\n default=1,\n error_message=\"Please enter an integer.\",\n )\n-\n- if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:\n- gpu_ids = _ask_field(\n- \"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:\",\n- default=\"all\",\n- )\n elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\n num_processes = _ask_field(\n \"How many GPU(s) should be used for distributed training? [1]:\",\n@@ -310,6 +305,12 @@ def get_cluster_input():\n else:\n num_processes = 1\n \n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:\n+ gpu_ids = _ask_field(\n+ \"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:\",\n+ default=\"all\",\n+ )\n+\n if distributed_type != DistributedType.TPU:\n if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n mixed_precision = \"no\"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/746", "pr_id": 1080453143 }, { "diff": "diff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\nindex b3fff25e3..cc388acac 100644\n--- a/docs/source/usage_guides/training_zoo.mdx\n+++ b/docs/source/usage_guides/training_zoo.mdx\n@@ -27,6 +27,7 @@ These examples showcase the base features of Accelerate and are a great starting\n \n These examples showcase specific features that the Accelerate framework offers\n \n+- [Automatic memory-aware gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/automatic_gradient_accumulation.py)\n - [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)\n - [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py)\n - [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py)\ndiff --git a/examples/by_feature/automatic_gradient_accumulation.py b/examples/by_feature/automatic_gradient_accumulation.py\nnew file mode 100644\nindex 000000000..d6e0cf028\n--- /dev/null\n+++ b/examples/by_feature/automatic_gradient_accumulation.py\n@@ -0,0 +1,232 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.optim import AdamW\n+from torch.utils.data import DataLoader\n+\n+# New Code #\n+import evaluate\n+from accelerate import Accelerator, DistributedType\n+from accelerate.utils import find_executable_batch_size\n+from datasets import load_dataset\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to combine both the gradient accumulation\n+# and automatic batch size finder utilities of Accelerate to perfrom\n+# automatic gradient accumulation\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n+def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ observed_batch_size = int(config[\"batch_size\"])\n+\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n+\n+ # New Code #\n+ # We use the `find_executable_batch_size` decorator, passing in the desired observed batch size\n+ # to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in\n+ # half each time. From this, we can calculate the number of gradient accumulation steps needed\n+ # and modify the Accelerator object as a result\n+ @find_executable_batch_size(starting_batch_size=int(observed_batch_size))\n+ def inner_training_loop(batch_size):\n+ # Since we need to modify the outside accelerator object, we need to bring it\n+ # to the local scope\n+ nonlocal accelerator\n+\n+ # We can calculate the number of gradient accumulation steps based on the current\n+ # batch size vs the starting batch size\n+ num_gradient_accumulation_steps = observed_batch_size // batch_size\n+\n+ # And then set it in the Accelerator directly:\n+ accelerator.gradient_accumulation_steps = num_gradient_accumulation_steps\n+\n+ # Next we need to free all of the stored model references in the Accelerator each time\n+ accelerator.free_memory()\n+\n+ # And set the seed so our results are reproducable each reset\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs),\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # And perform gradient accumulation\n+ with accelerator.accumulate(model):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+ # New Code #\n+ # And call it at the end with no arguments\n+ # Note: You could also refactor this outside of your training loop function\n+ inner_training_loop()\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ # New Code #\n+ # We modify the starting batch size to be an observed batch size of 256, to guarentee an initial CUDA OOM\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 256}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex ac8c4de52..9197070c8 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -36,6 +36,7 @@\n \"gradient_accumulation.py\",\n \"multi_process_metrics.py\",\n \"memory.py\",\n+ \"automatic_gradient_accumulation.py\",\n \"fsdp_with_peak_mem_tracking.py\",\n \"deepspeed_with_config_support.py\",\n ]\n", "code_comments": [ { "body": "```suggestion\r\n # half each time. From this, we can calculate the number of gradient accumulation steps needed\r\n```", "diff_hunk": "@@ -0,0 +1,232 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.optim import AdamW\n+from torch.utils.data import DataLoader\n+\n+# New Code #\n+import evaluate\n+from accelerate import Accelerator, DistributedType\n+from accelerate.utils import find_executable_batch_size\n+from datasets import load_dataset\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to combine both the gradient accumulation\n+# and automatic batch size finder utilities of Accelerate to perfrom\n+# automatic gradient accumulation\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n+def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ observed_batch_size = int(config[\"batch_size\"])\n+\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n+\n+ # New Code #\n+ # We use the `find_executable_batch_size` decorator, passing in the desired observed batch size\n+ # to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in\n+ # half each time. From this, we can calcualte the number of gradient accumulation steps needed", "from_author": false } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_742). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/742", "pr_id": 1077418429 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 982293a33..f348bb0a6 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -45,6 +45,7 @@\n )\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from accelerate.utils.dataclasses import SageMakerDistributedType\n+from accelerate.utils.launch import env_var_path_add\n \n \n if is_rich_available():\n@@ -573,7 +574,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- current_env[\"PYTHONPATH\"] = sys.executable\n+ current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath(\".\"))\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n current_env[\"USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex 31d71ad21..8642a441a 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -52,6 +52,16 @@ def _filter_args(args):\n return new_args\n \n \n+def env_var_path_add(env_var_name, path_to_add):\n+ \"\"\"\n+ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the\n+ caller to set it in os.environ.\n+ \"\"\"\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\n+ paths.append(str(path_to_add))\n+ return \":\".join(paths)\n+\n+\n class PrepareForLaunch:\n \"\"\"\n Prepare a function that will launched in a distributed setup.\n", "code_comments": [ { "body": "Let's just write the three lines of code instead of adding a nested function? It will be shorter and as easy to read.", "diff_hunk": "@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- current_env[\"PYTHONPATH\"] = sys.executable\n+ def env_path_add(env_var_name, new_path):\n+ \"\"\"extend path-based env variable with a new path\"\"\"\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\n+ paths.append(str(new_path))\n+ return \":\".join(paths)\n+\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)", "from_author": false }, { "body": "as it's likely that at some point you might want to extend other path-based env vars like `PATH` or `LD_LIBRARY_PATH` - one more option is to move it to utils. \r\n\r\nbut if you prefer inlined and @pacman100 is an agreement I will rewrite.", "diff_hunk": "@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- current_env[\"PYTHONPATH\"] = sys.executable\n+ def env_path_add(env_var_name, new_path):\n+ \"\"\"extend path-based env variable with a new path\"\"\"\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\n+ paths.append(str(new_path))\n+ return \":\".join(paths)\n+\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)", "from_author": true }, { "body": "I'm fine having it as a util too, just don't like the nested function approach where it can't be reused.", "diff_hunk": "@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- current_env[\"PYTHONPATH\"] = sys.executable\n+ def env_path_add(env_var_name, new_path):\n+ \"\"\"extend path-based env variable with a new path\"\"\"\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\n+ paths.append(str(new_path))\n+ return \":\".join(paths)\n+\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)", "from_author": false }, { "body": "pushed the change to move to utils, renamed the helper for better clarity now that it's far away from definition.", "diff_hunk": "@@ -573,7 +573,13 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- current_env[\"PYTHONPATH\"] = sys.executable\n+ def env_path_add(env_var_name, new_path):\n+ \"\"\"extend path-based env variable with a new path\"\"\"\n+ paths = [p for p in os.environ.get(env_var_name, \"\").split(\":\") if len(p) > 0]\n+ paths.append(str(new_path))\n+ return \":\".join(paths)\n+\n+ current_env[\"PYTHONPATH\"] = env_path_add(\"PYTHONPATH\", sys.executable)", "from_author": true }, { "body": "```suggestion\r\n current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", os.path.abspath('.'))\r\n```", "diff_hunk": "@@ -573,7 +574,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- current_env[\"PYTHONPATH\"] = sys.executable\n+ current_env[\"PYTHONPATH\"] = env_var_path_add(\"PYTHONPATH\", sys.executable)", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Oops, Thanks a lot for pointing this out! Post the above PR, a sample run is shown below:\r\n```\r\n>>> export PYTHONPATH=\"sample_dir/fancy_library\"\r\n...\r\n\r\n>>> accelerate launch --config_file ds_mn_hf.yaml accelerate/examples/complete_nlp_example.py --mixed_precision \"fp16\"\r\n[2022-10-06 12:00:16,739] [INFO] [runner.py:504:main] cmd = pdsh -S -f 1024 -w localhost,sourab_huggingface_co@sourab-vm-image-1 \r\nexport PYTHONPATH=sample_dir/fancy_library:/home/sourab_huggingface_co;\r\n\r\n...\r\n\r\nexport MIXED_PRECISION=fp16; export USE_DEEPSPEED=true; export DEEPSPEED_ZERO_STAGE=2; export GRADIENT_ACCUMULATION_STEPS=1; export GRADIENT_CLIPPING=1.0; export DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE=none; export DEEPSPEED_OFFLOAD_PARAM_DEVICE=none; export DEEPSPEED_ZERO3_INIT=false; export DEEPSPEED_ZERO3_SAVE_16BIT_MODEL=none; cd /home/sourab_huggingface_co; /opt/conda/envs/ml/bin/python3.9 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMF0sICJzb3VyYWJfaHVnZ2luZ2ZhY2VfY29Ac291cmFiLXZtLWltYWdlLTEiOiBbMF19 --node_rank=%n --master_addr=xx.xxx.x.x --master_port=29500 --no_local_rank accelerate/examples/complete_nlp_example.py --mixed_precision 'fp16'\r\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:136:main] WORLD INFO DICT: {'localhost': [0], 'sourab_huggingface_co@sourab-vm-image-1': [0]}\r\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:142:main] nnodes=2, num_local_procs=1, node_rank=0\r\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:155:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0], 'sourab_huggingface_co@sourab-vm-image-1': [1]})\r\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:156:main] dist_world_size=2\r\nlocalhost: [2022-10-06 12:00:22,720] [INFO] [launch.py:158:main] Setting CUDA_VISIBLE_DEVICES=0\r\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:136:main] WORLD INFO DICT: {'localhost': [0], 'sourab_huggingface_co@sourab-vm-image-1': [0]}\r\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:142:main] nnodes=2, num_local_procs=1, node_rank=1\r\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:155:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0], 'sourab_huggingface_co@sourab-vm-image-1': [1]})\r\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:156:main] dist_world_size=2\r\nsourab-vm-image-1: [2022-10-06 12:00:23,404] [INFO] [launch.py:158:main] Setting CUDA_VISIBLE_DEVICES=0\r\nlocalhost: [2022-10-06 12:00:27,932] [INFO] [comm.py:633:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl\r\n\r\n...\r\n\r\nlocalhost: [2022-10-06 12:01:10,571] [INFO] [config.py:976:print_user_config] json = {\r\nlocalhost: \"train_batch_size\": 32, \r\nlocalhost: \"train_micro_batch_size_per_gpu\": 16, \r\nlocalhost: \"gradient_accumulation_steps\": 1, \r\nlocalhost: \"zero_optimization\": {\r\nlocalhost: \"stage\": 2, \r\nlocalhost: \"offload_optimizer\": {\r\nlocalhost: \"device\": \"none\"\r\nlocalhost: }, \r\nlocalhost: \"offload_param\": {\r\nlocalhost: \"device\": \"none\"\r\nlocalhost: }, \r\nlocalhost: \"stage3_gather_16bit_weights_on_model_save\": false\r\nlocalhost: }, \r\nlocalhost: \"gradient_clipping\": 1.0, \r\nlocalhost: \"steps_per_print\": inf, \r\nlocalhost: \"fp16\": {\r\nlocalhost: \"enabled\": true, \r\nlocalhost: \"auto_cast\": true\r\nlocalhost: }, \r\nlocalhost: \"zero_allow_untested_optimizer\": true\r\nlocalhost: }\r\nlocalhost: Using /home/sourab_huggingface_co/.cache/torch_extensions/py39_cu113 as PyTorch extensions root...\r\n\r\n...\r\n\r\nlocalhost: epoch 0: {'accuracy': 0.7303921568627451, 'f1': 0.8318042813455658}\r\nsourab-vm-image-1: epoch 0: {'accuracy': 0.7303921568627451, 'f1': 0.8318042813455658}\r\nlocalhost: epoch 1: {'accuracy': 0.8308823529411765, 'f1': 0.880415944540728}\r\nsourab-vm-image-1: epoch 1: {'accuracy': 0.8308823529411765, 'f1': 0.880415944540728}\r\nlocalhost: epoch 2: {'accuracy': 0.8333333333333334, 'f1': 0.8811188811188811}\r\nsourab-vm-image-1: epoch 2: {'accuracy': 0.8333333333333334, 'f1': 0.8811188811188811}\r\n\r\n```", "from_author": false }, { "body": "@pacman100, please feel free to merge when you feel it's ready. Thanks.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/741", "pr_id": 1076498274 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 5d8ff0c1b..593eec741 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -6,6 +6,7 @@ on:\n - \"src/**\"\n - \"tests/**\"\n - \".github/**\"\n+ - \"examples/**\"\n - \"setup.py\"\n types: [opened, synchronize, reopened]\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/740", "pr_id": 1076433990 }, { "diff": "diff --git a/docs/source/usage_guides/memory.mdx b/docs/source/usage_guides/memory.mdx\nindex 8841cd584..213a2f670 100644\n--- a/docs/source/usage_guides/memory.mdx\n+++ b/docs/source/usage_guides/memory.mdx\n@@ -25,16 +25,20 @@ training script. To use it, restructure your training function to include an inn\n and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code. \n > Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us\n \n+It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,\n+such as models and optimizers.\n+\n ```diff\n def training_function(args):\n accelerator = Accelerator()\n- model = get_model()\n- model.to(accelerator.device)\n- optimizer = get_optimizer()\n \n + @find_executable_batch_size(starting_batch_size=args.batch_size)\n + def inner_training_loop(batch_size):\n-+ nonlocal model, optimizer # Ensure they can be used in our context\n++ nonlocal accelerator # Ensure they can be used in our context\n++ accelerator.free_memory() # Free all lingering references\n+ model = get_model()\n+ model.to(accelerator.device)\n+ optimizer = get_optimizer()\n train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n lr_scheduler = get_scheduler(\n optimizer, \ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 82dc34b8a..684a32853 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -122,24 +122,6 @@ def training_function(config, args):\n \n metric = evaluate.load(\"glue\", \"mrpc\")\n \n- # If the batch size is too big we use gradient accumulation\n- gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n- gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n- batch_size = MAX_GPU_BATCH_SIZE\n-\n- set_seed(seed)\n- # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n- model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n-\n- # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n- # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n- # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n- model = model.to(accelerator.device)\n-\n- # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr)\n-\n # New Code #\n # We now can define an inner training loop function. It should take a batch size as the only parameter,\n # and build the dataloaders in there.\n@@ -147,16 +129,31 @@ def training_function(config, args):\n @find_executable_batch_size(starting_batch_size=batch_size)\n def inner_training_loop(batch_size):\n # And now just move everything below under this function\n- # Ensure that anything declared outside this function is set as `nonlocal`\n- # so it is in scope\n- nonlocal model, optimizer\n+ # We need to bring in the Accelerator object from earlier\n+ nonlocal accelerator\n+ # And reset all of its attributes that could hold onto any memory:\n+ accelerator.free_memory()\n+\n+ # Then we can declare the model, optimizer, and everything else:\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=100,\n- num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ num_training_steps=(len(train_dataloader) * num_epochs),\n )\n \n # Prepare everything\n@@ -174,12 +171,10 @@ def inner_training_loop(batch_size):\n batch.to(accelerator.device)\n outputs = model(**batch)\n loss = outputs.loss\n- loss = loss / gradient_accumulation_steps\n accelerator.backward(loss)\n- if step % gradient_accumulation_steps == 0:\n- optimizer.step()\n- lr_scheduler.step()\n- optimizer.zero_grad()\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n \n model.eval()\n for step, batch in enumerate(eval_dataloader):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/739", "pr_id": 1076432519 }, { "diff": "diff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\nindex d0510bf93..b3fff25e3 100644\n--- a/docs/source/usage_guides/training_zoo.mdx\n+++ b/docs/source/usage_guides/training_zoo.mdx\n@@ -100,3 +100,7 @@ These are tutorials from libraries that integrate with πŸ€— Accelerate:\n ### Tez \n \n - [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)\n+\n+### trlx \n+\n+- [How to implement a sentiment learning task with trlx](https://github.com/CarperAI/trlx#example-how-to-add-a-task)\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/738", "pr_id": 1076041429 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 84c4e03f6..559c798c5 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -32,9 +32,11 @@\n - local: usage_guides/memory\n title: How to avoid CUDA Out-of-Memory\n - local: usage_guides/sagemaker\n- title: Using Accelerate on SageMaker\n+ title: Using πŸ€— Accelerate on SageMaker\n - local: usage_guides/mps\n title: How to use Apple Silicon M1 GPUs\n+ - local: usage_guides/training_zoo\n+ title: πŸ€— Accelerate Example Zoo\n title: How-To Guides\n - sections:\n - local: concept_guides/gradient_synchronization\ndiff --git a/docs/source/usage_guides/training_zoo.mdx b/docs/source/usage_guides/training_zoo.mdx\nnew file mode 100644\nindex 000000000..d0510bf93\n--- /dev/null\n+++ b/docs/source/usage_guides/training_zoo.mdx\n@@ -0,0 +1,102 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Example Zoo\n+\n+Below contains a non-exhuastive list of tutorials and scripts showcasing Accelerate\n+\n+## Official Accelerate Examples:\n+\n+### Basic Examples\n+\n+These examples showcase the base features of Accelerate and are a great starting point\n+\n+- [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py)\n+- [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py)\n+\n+### Feature Specific Examples\n+\n+These examples showcase specific features that the Accelerate framework offers\n+\n+- [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)\n+- [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py)\n+- [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py)\n+- [Fully Sharded Data Parallelism](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/fsdp_with_peak_mem_tracking.py)\n+- [Gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation.py)\n+- [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py)\n+- [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py)\n+- [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py)\n+\n+### Full Examples \n+\n+These examples showcase every feature in Accelerate at once that was shown in \"Feature Specific Examples\"\n+\n+- [Complete NLP example](https://github.com/huggingface/accelerate/blob/main/examples/complete_nlp_example.py)\n+- [Complete computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/complete_cv_example.py)\n+- [Causal language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py)\n+- [Masked language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_no_trainer.py)\n+- [Speech pretraining example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py)\n+- [Translation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py)\n+- [Text classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue_no_trainer.py)\n+- [Semantic segmentation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py)\n+- [Question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_no_trainer.py)\n+- [Beam search question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py)\n+- [Multiple choice question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py)\n+- [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py)\n+- [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py)\n+- [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py)\n+\n+## Integration Examples \n+\n+These are tutorials from libraries that integrate with πŸ€— Accelerate: \n+\n+### Catalyst\n+\n+- [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html)\n+\n+### DALLE2-pytorch \n+\n+- [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage)\n+\n+### πŸ€— diffusers\n+\n+- [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)\n+- [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)\n+\n+### fastai \n+\n+- [Distributed training from Jupyter Notebooks with fastai](https://docs.fast.ai/tutorial.distributed.html)\n+- [Basic distributed training examples with fastai](https://docs.fast.ai/examples/distributed_app_examples.html)\n+\n+### GradsFlow\n+\n+- [Auto Image Classification with GradsFlow](https://docs.gradsflow.com/en/latest/examples/nbs/01-ImageClassification/)\n+\n+### imagen-pytorch \n+\n+- [Fine-tuning Imagen](https://github.com/lucidrains/imagen-pytorch#usage)\n+\n+### Kornia\n+\n+- [Fine-tuning vision models with Kornia's Trainer](https://kornia.readthedocs.io/en/latest/get-started/training.html)\n+\n+### PyTorch Accelerated \n+\n+- [Quickstart distributed training tutorial with PyTorch Accelerated](https://pytorch-accelerated.readthedocs.io/en/latest/quickstart.html)\n+\n+### PyTorch3D\n+\n+- [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/)\n+\n+### Tez \n+\n+- [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/737", "pr_id": 1075007649 }, { "diff": "diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml\nindex 69a101307..3ed39ac4b 100644\n--- a/.github/ISSUE_TEMPLATE/bug-report.yml\n+++ b/.github/ISSUE_TEMPLATE/bug-report.yml\n@@ -1,6 +1,5 @@\n name: \"\\U0001F41B Bug Report\"\n description: Submit a bug report to help us improve Accelerate\n-labels: [ \"bug\" ]\n body:\n - type: textarea\n id: system-info\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/735", "pr_id": 1074942681 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 84c4e03f6..ab24fe8e5 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -37,6 +37,8 @@\n title: How to use Apple Silicon M1 GPUs\n title: How-To Guides\n - sections:\n+ - local: concept_guides/performance\n+ title: Comparing performance across distributed setups\n - local: concept_guides/gradient_synchronization\n title: Gradient synchronization\n - local: concept_guides/deferring_execution\ndiff --git a/docs/source/concept_guides/deferring_execution.mdx b/docs/source/concept_guides/deferring_execution.mdx\nindex 4297e2567..cb80ee0da 100644\n--- a/docs/source/concept_guides/deferring_execution.mdx\n+++ b/docs/source/concept_guides/deferring_execution.mdx\n@@ -1,3 +1,15 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n # Deferring Executions\n \n When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\ndiff --git a/docs/source/concept_guides/performance.mdx b/docs/source/concept_guides/performance.mdx\nnew file mode 100644\nindex 000000000..c974b322f\n--- /dev/null\n+++ b/docs/source/concept_guides/performance.mdx\n@@ -0,0 +1,91 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Comparing performance between different device setups\n+\n+Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for.\n+For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate \n+and expect your results to line up. \n+\n+But why?\n+\n+There's three reasons for this that this tutorial will cover: \n+\n+1. **Setting the right seeds**\n+2. **Observed Batch Sizes**\n+3. **Learning Rates**\n+\n+## Setting the Seed \n+\n+While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:\n+\n+```python\n+from accelerate import set_seed\n+\n+set_seed(42)\n+```\n+\n+Why is this important? Under the hood this will set **5** different seed settings:\n+\n+```python\n+ random.seed(seed)\n+ np.random.seed(seed)\n+ torch.manual_seed(seed)\n+ torch.cuda.manual_seed_all(seed)\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(seed)\n+```\n+\n+The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state.\n+\n+## Observed Batch Sizes \n+\n+When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is \n+a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for,\n+as well as similarly for TPUs. \n+\n+The below table can be used as a quick reference to try out different batch sizes:\n+\n+<Tip>\n+\n+In this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\n+\n+</Tip>\n+\n+| Single GPU Batch Size | Multi-GPU Equivalent Batch Size | TPU Equivalent Batch Size |\n+|-----------------------|---------------------------------|---------------------------|\n+| 256 | 128 | 32 |\n+| 128 | 64 | 16 |\n+| 64 | 32 | 8 |\n+| 32 | 16 | 4 |\n+\n+## Learning Rates \n+\n+As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below \n+snippet shows doing so with Accelerate:\n+\n+<Tip>\n+\n+Since users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their \n+learning rate or not.\n+ \n+</Tip>\n+\n+```python\n+learning_rate = 1e-3\n+accelerator = Accelerator()\n+learning_rate *= accelerator.num_processes\n+\n+optimizer = AdamW(params=model.parameters(), lr=learning_rate)\n+```\n+\n", "code_comments": [ { "body": "```suggestion\r\n<Tip>\r\n\r\nIn this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\r\n\r\n</Tip>\r\n```", "diff_hunk": "@@ -0,0 +1,87 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Comparing performance between different device setups\n+\n+Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for.\n+For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate \n+and expect your results to line up. \n+\n+But why?\n+\n+There's three reasons for this that this tutorial will cover: \n+\n+1. **Setting the right seeds**\n+2. **Observed Batch Sizes**\n+3. **Learning Rates**\n+\n+## Setting the Seed \n+\n+While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:\n+\n+```python\n+from accelerate import set_seed\n+\n+set_seed(42)\n+```\n+\n+Why is this important? Under the hood this will set **5** different seed settings:\n+\n+```python\n+ random.seed(seed)\n+ np.random.seed(seed)\n+ torch.manual_seed(seed)\n+ torch.cuda.manual_seed_all(seed)\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(seed)\n+```\n+\n+The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state.\n+\n+## Observed Batch Sizes \n+\n+When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is \n+a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for,\n+as well as similarly for TPUs. \n+\n+The below table can be used as a quick reference to try out different batch sizes:\n+\n+<Tip>\n+ In this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\n+</Tip>", "from_author": false }, { "body": "```suggestion\r\n<Tip>\r\n\r\nSince users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their \r\nlearning rate or not.\r\n \r\n</Tip>\r\n```", "diff_hunk": "@@ -0,0 +1,87 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Comparing performance between different device setups\n+\n+Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for.\n+For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate \n+and expect your results to line up. \n+\n+But why?\n+\n+There's three reasons for this that this tutorial will cover: \n+\n+1. **Setting the right seeds**\n+2. **Observed Batch Sizes**\n+3. **Learning Rates**\n+\n+## Setting the Seed \n+\n+While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:\n+\n+```python\n+from accelerate import set_seed\n+\n+set_seed(42)\n+```\n+\n+Why is this important? Under the hood this will set **5** different seed settings:\n+\n+```python\n+ random.seed(seed)\n+ np.random.seed(seed)\n+ torch.manual_seed(seed)\n+ torch.cuda.manual_seed_all(seed)\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(seed)\n+```\n+\n+The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state.\n+\n+## Observed Batch Sizes \n+\n+When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is \n+a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for,\n+as well as similarly for TPUs. \n+\n+The below table can be used as a quick reference to try out different batch sizes:\n+\n+<Tip>\n+ In this example there are two GPUs for \"Multi-GPU\" and a TPU pod with 8 workers\n+</Tip>\n+\n+| Single GPU Batch Size | Multi-GPU Equivalent Batch Size | TPU Equivalent Batch Size |\n+|-----------------------|---------------------------------|---------------------------|\n+| 256 | 128 | 32 |\n+| 128 | 64 | 16 |\n+| 64 | 32 | 8 |\n+| 32 | 16 | 4 |\n+\n+## Learning Rates \n+\n+As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below \n+snippet shows doing so with Accelerate:\n+\n+<Tip>\n+ Since users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their \n+ learning rate or not.\n+</Tip>", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/734", "pr_id": 1074934430 }, { "diff": "diff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nindex 092b72850..a9e955d43 100644\n--- a/docs/source/package_reference/cli.mdx\n+++ b/docs/source/package_reference/cli.mdx\n@@ -94,6 +94,7 @@ The following arguments are useful for customization of worker machines\n * `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\n * `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\n * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\n+* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list\n * `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\n * `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\n * `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex a174115b1..01c419f4f 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -35,6 +35,7 @@ def get_cluster_input():\n \n machine_rank = 0\n num_machines = 1\n+ gpu_ids = None\n main_process_ip = None\n main_process_port = None\n rdzv_backend = \"static\"\n@@ -293,6 +294,12 @@ def get_cluster_input():\n default=1,\n error_message=\"Please enter an integer.\",\n )\n+\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:\n+ gpu_ids = _ask_field(\n+ \"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:\",\n+ default=\"all\",\n+ )\n elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\n num_processes = _ask_field(\n \"How many GPU(s) should be used for distributed training? [1]:\",\n@@ -325,6 +332,7 @@ def get_cluster_input():\n compute_environment=ComputeEnvironment.LOCAL_MACHINE,\n distributed_type=distributed_type,\n num_processes=num_processes,\n+ gpu_ids=gpu_ids,\n mixed_precision=mixed_precision,\n downcast_bf16=downcast_bf16,\n machine_rank=machine_rank,\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 68da64963..43faf45ca 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -135,6 +135,7 @@ class ClusterConfig(BaseConfig):\n num_processes: int\n machine_rank: int = 0\n num_machines: int = 1\n+ gpu_ids: Optional[str] = None\n main_process_ip: Optional[str] = None\n main_process_port: Optional[int] = None\n rdzv_backend: Optional[str] = \"static\"\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex d1905d1c0..982293a33 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -259,6 +259,11 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\n )\n+ parser.add_argument(\n+ \"--gpu_ids\",\n+ default=None,\n+ help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\n+ )\n parser.add_argument(\n \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\n )\n@@ -366,6 +371,8 @@ def simple_launcher(args):\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\n if args.use_mps_device:\n current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n+ elif args.gpu_ids != \"all\":\n+ current_env[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\n if args.num_machines > 1:\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\n current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n@@ -420,6 +427,9 @@ def multi_gpu_launcher(args):\n setattr(args, \"no_python\", True)\n \n current_env = os.environ.copy()\n+ gpu_ids = getattr(args, \"gpu_ids\")\n+ if gpu_ids != \"all\":\n+ current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\n mixed_precision = args.mixed_precision.lower()\n try:\n mixed_precision = PrecisionType(mixed_precision)\n@@ -549,6 +559,9 @@ def deepspeed_launcher(args):\n setattr(args, \"no_python\", True)\n \n current_env = os.environ.copy()\n+ gpu_ids = getattr(args, \"gpu_ids\")\n+ if gpu_ids != \"all\":\n+ current_env[\"CUDA_VISIBLE_DEVICES\"] = gpu_ids\n try:\n mixed_precision = PrecisionType(args.mixed_precision.lower())\n except ValueError:\n@@ -817,6 +830,14 @@ def launch_command(args):\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n+ if not args.use_mps_device:\n+ if args.gpu_ids is None:\n+ if defaults.gpu_ids is not None:\n+ args.gpu_ids = defaults.gpu_ids\n+ else:\n+ args.gpu_ids = \"all\"\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\n+ args.multi_gpu = False\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n", "code_comments": [ { "body": "I meant with respect to these changes\n", "diff_hunk": "@@ -817,6 +830,13 @@ def launch_command(args):\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n+ if args.gpu_ids is None:\n+ if defaults.gpu_ids is not None:\n+ args.gpu_ids = defaults.gpu_ids\n+ else:\n+ args.gpu_ids = \"all\"\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\n+ args.multi_gpu = False", "from_author": false }, { "body": "Let me know what you think about my solution here: https://github.com/huggingface/accelerate/pull/732/commits/0467d4ff0bacda7ffb0d6087a7ef636cfbbb4960", "diff_hunk": "@@ -817,6 +830,13 @@ def launch_command(args):\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n+ if args.gpu_ids is None:\n+ if defaults.gpu_ids is not None:\n+ args.gpu_ids = defaults.gpu_ids\n+ else:\n+ args.gpu_ids = \"all\"\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\n+ args.multi_gpu = False", "from_author": true }, { "body": "Nice! This should have expected behaviour. πŸ€—", "diff_hunk": "@@ -817,6 +830,13 @@ def launch_command(args):\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n+ if args.gpu_ids is None:\n+ if defaults.gpu_ids is not None:\n+ args.gpu_ids = defaults.gpu_ids\n+ else:\n+ args.gpu_ids = \"all\"\n+ if len(args.gpu_ids.split(\",\")) < 2 and args.multi_gpu and (args.gpu_ids != \"all\"):\n+ args.multi_gpu = False", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@pacman100 correct, this is solely for non-M1 GPUs, I'll specify that in the info description for it", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/732", "pr_id": 1072847345 }, { "diff": "diff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nindex 4ae91a657..835d4e0d9 100644\n--- a/src/accelerate/scheduler.py\n+++ b/src/accelerate/scheduler.py\n@@ -69,7 +69,9 @@ def step(self, *args, **kwargs):\n num_processes = AcceleratorState().num_processes\n for _ in range(num_processes):\n # Special case when using OneCycle and `drop_last` was not used\n- if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\n+ if hasattr(self.scheduler, \"total_steps\") and self.scheduler._step_count <= self.scheduler.total_steps:\n+ self.scheduler.step(*args, **kwargs)\n+ else:\n self.scheduler.step(*args, **kwargs)\n \n # Passthroughs\ndiff --git a/tests/test_scheduler.py b/tests/test_scheduler.py\nindex be4f975fb..c1ef18f1e 100644\n--- a/tests/test_scheduler.py\n+++ b/tests/test_scheduler.py\n@@ -21,12 +21,30 @@\n from accelerate.test_utils import require_cpu\n \n \n-def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n+def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\n model = torch.nn.Linear(2, 4)\n optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n- scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)\n+ scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)\n+ model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)\n \n+ # Optimizer has stepped\n+ scheduler.step()\n+ if step_scheduler_with_optimizer or (num_processes == 1):\n+ assert (\n+ scheduler.scheduler.last_epoch == num_processes\n+ ), f\"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})\"\n+ else:\n+ assert (\n+ scheduler.scheduler.last_epoch != num_processes\n+ ), f\"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})\"\n+\n+\n+def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n+ accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\n+ model = torch.nn.Linear(2, 4)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n+ scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)\n model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)\n \n # Optimizer has stepped\n@@ -49,16 +67,30 @@ def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_ba\n \n @require_cpu\n class SchedulerTester(unittest.TestCase):\n- def test_scheduler_steps_with_optimizer_single_process(self):\n- debug_launcher(partial(scheduler_test, num_processes=1), num_processes=1)\n- debug_launcher(partial(scheduler_test, num_processes=1, split_batches=True), num_processes=1)\n+ def test_lambda_scheduler_steps_with_optimizer_single_process(self):\n+ debug_launcher(partial(lambda_test, num_processes=1), num_processes=1)\n+ debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)\n+\n+ def test_one_cycle_scheduler_steps_with_optimizer_single_process(self):\n+ debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1)\n+ debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)\n+\n+ def test_lambda_scheduler_not_step_with_optimizer_single_process(self):\n+ debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\n+\n+ def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self):\n+ debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\n+\n+ def test_lambda_scheduler_steps_with_optimizer_multiprocess(self):\n+ debug_launcher(lambda_test)\n+ debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)\n \n- def test_scheduler_not_step_with_optimizer_single_process(self):\n- debug_launcher(partial(scheduler_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\n+ def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self):\n+ debug_launcher(one_cycle_test)\n+ debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)\n \n- def test_scheduler_steps_with_optimizer_multiprocess(self):\n- debug_launcher(scheduler_test)\n- debug_launcher(partial(scheduler_test, num_processes=1, split_batches=True), num_processes=1)\n+ def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self):\n+ debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False))\n \n- def test_scheduler_not_step_with_optimizer_multiprocess(self):\n- debug_launcher(partial(scheduler_test, step_scheduler_with_optimizer=False))\n+ def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self):\n+ debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False))\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/728", "pr_id": 1070510180 }, { "diff": "diff --git a/.github/workflows/build_and_run_tests.yml b/.github/workflows/build_and_run_tests.yml\nindex a5378353d..abf9f3883 100644\n--- a/.github/workflows/build_and_run_tests.yml\n+++ b/.github/workflows/build_and_run_tests.yml\n@@ -34,8 +34,8 @@ jobs:\n done\n \n build-docker-containers:\n- needs: check-for-setup\n- if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\n+ needs: check-for-source\n+ if: (github.event_name == 'push') && (needs.check-for-source.outputs.changed == '1')\n uses: ./.github/workflows/build_docker_images.yml\n secrets: inherit\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/727", "pr_id": 1070245844 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex bf57d11a4..d1905d1c0 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -844,8 +844,8 @@ def launch_command(args):\n args.mixed_precision = defaults.mixed_precision\n else:\n if args.num_processes is None:\n- warned.append(\"\\t`--num_processes` was set to a value of `1`\")\n- args.num_processes = 1\n+ args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\n+ warned.append(\"\\t`--num_processes` was set to a value of `{args.num_processes}`\")\n if args.num_machines is None:\n warned.append(\"\\t`--num_machines` was set to a value of `1`\")\n args.num_machines = 1\n@@ -854,14 +854,6 @@ def launch_command(args):\n args.mixed_precision = \"no\"\n if not hasattr(args, \"use_cpu\"):\n args.use_cpu = args.cpu\n- if args.multi_gpu and args.num_processes == 1:\n- args.num_processes = torch.cuda.device_count()\n- if not any(\"--num_processes\" in warn for warn in warned):\n- warned.append(f\"\\t`--num_processes` was set to `{args.num_processes}`\")\n- else:\n- for i, warn in enumerate(warned):\n- if \"--num_processes\" in warn:\n- warned[i] = warn.replace(\"`1`\", f\"`{args.num_processes}`\")\n \n if args.num_cpu_threads_per_process is None:\n local_size = get_int_from_env(\n", "code_comments": [ { "body": "```suggestion\r\n args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1\r\n warned.append(\"\\t`--num_processes` was set to a value of `{args.num_processes}`\")\r\n```", "diff_hunk": "@@ -845,7 +845,7 @@ def launch_command(args):\n else:\n if args.num_processes is None:\n warned.append(\"\\t`--num_processes` was set to a value of `1`\")", "from_author": false }, { "body": "```suggestion\r\n```", "diff_hunk": "@@ -845,7 +845,7 @@ def launch_command(args):\n else:\n if args.num_processes is None:\n warned.append(\"\\t`--num_processes` was set to a value of `1`\")\n- args.num_processes = 1\n+ args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/726", "pr_id": 1069149049 }, { "diff": "diff --git a/.github/workflows/build-docker-images-release.yml b/.github/workflows/build-docker-images-release.yml\nnew file mode 100644\nindex 000000000..654259f87\n--- /dev/null\n+++ b/.github/workflows/build-docker-images-release.yml\n@@ -0,0 +1,64 @@\n+name: Build Docker images (releases)\n+\n+on:\n+ workflow_dispatch:\n+ release:\n+ types: [published]\n+\n+concurrency:\n+ group: docker-image-builds\n+ cancel-in-progress: false\n+\n+jobs:\n+ get-version:\n+ runs-on: ubuntu-latest\n+ outputs:\n+ version: ${{ steps.step1.outputs.version }}\n+ steps:\n+ - uses: actions/checkout@v3\n+ - id: step1\n+ run: echo \"::set-output name=version::$(python setup.py --version)\"\n+\n+ version-cpu:\n+ name: \"Latest Accelerate CPU [version]\"\n+ runs-on: ubuntu-latest\n+ needs: get-version\n+ steps:\n+ - name: Set up Docker Buildx\n+ uses: docker/setup-buildx-action@v1\n+ - name: Check out code\n+ uses: actions/checkout@v2\n+ - name: Login to DockerHub\n+ uses: docker/login-action@v1\n+ with:\n+ username: ${{ secrets.DOCKERHUB_USERNAME }}\n+ password: ${{ secrets.DOCKERHUB_PASSWORD }}\n+\n+ - name: Build and Push CPU\n+ uses: docker/build-push-action@v2\n+ with:\n+ context: ./docker/accelerate-cpu\n+ push: true\n+ tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}\n+\n+ version-cuda:\n+ name: \"Latest Accelerate GPU [version]\"\n+ runs-on: ubuntu-latest\n+ needs: get-version\n+ steps:\n+ - name: Set up Docker Buildx\n+ uses: docker/setup-buildx-action@v1\n+ - name: Check out code\n+ uses: actions/checkout@v2\n+ - name: Login to DockerHub\n+ uses: docker/login-action@v1\n+ with:\n+ username: ${{ secrets.DOCKERHUB_USERNAME }}\n+ password: ${{ secrets.DOCKERHUB_PASSWORD }}\n+\n+ - name: Build and Push GPU\n+ uses: docker/build-push-action@v2\n+ with:\n+ context: ./docker/accelerate-gpu\n+ push: true\n+ tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> FFT, should we just merge these two release workflows together and check if 'dev' is in the version to automatically tag the `latest` tag? Or maybe just publish the prerelease tags? WDYT?\r\n\r\nI guess you are talking about the existing `build_docker_images.yml` here. I don't have particular opinion about if merging these 2 workflow files or not.", "from_author": false }, { "body": "@ydshieh by other folks, not for testing. We've had people use the latest images already, so made sense to also do releases \r\n\r\nOkay, will think on this and do a follow up most likely with the merge ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/725", "pr_id": 1069096305 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 1eb8ff79d..fac1b50e2 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -347,7 +347,6 @@ def __iter__(self):\n try:\n current_batch = next(dataloader_iter)\n except StopIteration:\n- self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\n yield\n while True:\n try:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/724", "pr_id": 1068881556 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex ab4b48660..2b6558944 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -619,6 +619,12 @@ def prepare(self, *args, device_placement=None):\n device_placement (`List[bool]`, *optional*):\n Used to customize whether automatic device placement should be performed for each object passed. Needs\n to be a list of the same length as `args`.\n+\n+ <Tip>\n+\n+ You don't need to prepare a model if you only use it for inference without any kind of mixed precision\n+\n+ </Tip>\n \"\"\"\n if device_placement is None:\n device_placement = [None for _ in args]\n@@ -699,7 +705,8 @@ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n \n Args:\n model (`torch.nn.Module`):\n- A PyTorch model to prepare\n+ A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without\n+ any kind of mixed precision\n device_placement (`bool`, *optional*):\n Whether or not to place the model on the proper device. Will default to `self.device_placement`.\n \"\"\"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/722", "pr_id": 1067475067 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex cd74b68e2..ab4b48660 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -547,15 +547,15 @@ def print(self, *args, **kwargs):\n if self.is_local_main_process:\n print(*args, **kwargs)\n \n- def _prepare_one(self, obj, first_pass=False):\n+ def _prepare_one(self, obj, first_pass=False, device_placement=None):\n # First pass of preparation: DataLoader, model, optimizer\n if first_pass:\n if isinstance(obj, torch.utils.data.DataLoader):\n- return self.prepare_data_loader(obj)\n+ return self.prepare_data_loader(obj, device_placement=device_placement)\n elif isinstance(obj, torch.nn.Module):\n- return self.prepare_model(obj)\n+ return self.prepare_model(obj, device_placement=device_placement)\n elif isinstance(obj, torch.optim.Optimizer):\n- optimizer = self.prepare_optimizer(obj)\n+ optimizer = self.prepare_optimizer(obj, device_placement=device_placement)\n return optimizer\n # Second pass of preparation: LR scheduler (which need the full list of optimizers)\n elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler):\n@@ -602,17 +602,33 @@ def _prepare_fsdp(self, *args):\n self._optimizers = optimizers\n return tuple(result)\n \n- def prepare(self, *args):\n+ def prepare(self, *args, device_placement=None):\n \"\"\"\n Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same\n order.\n \n- Accepts the following type of objects:\n+ Args:\n+ *args (list of objects):\n+ Any of the following type of objects:\n+\n+ - `torch.utils.data.DataLoader`: PyTorch Dataloader\n+ - `torch.nn.Module`: PyTorch Module\n+ - `torch.optim.Optimizer`: PyTorch Optimizer\n+ - `torch.optim.lr_scheduler._LRScheduler`: PyTorch LR Scheduler\n \n- - `torch.utils.data.DataLoader`: PyTorch Dataloader\n- - `torch.nn.Module`: PyTorch Module\n- - `torch.optim.Optimizer`: PyTorch Optimizer\n+ device_placement (`List[bool]`, *optional*):\n+ Used to customize whether automatic device placement should be performed for each object passed. Needs\n+ to be a list of the same length as `args`.\n \"\"\"\n+ if device_placement is None:\n+ device_placement = [None for _ in args]\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\n+ raise ValueError(\"You can't customize device placements with DeepSpeed.\")\n+ elif len(device_placement) != len(args):\n+ raise ValueError(\n+ f\"`device_placement` should be a list with {len(args)} elements (the number of objects passed).\"\n+ )\n+\n if self.distributed_type == DistributedType.FSDP:\n model_count = 0\n optimizer_present = False\n@@ -656,8 +672,10 @@ def prepare(self, *args):\n if self.distributed_type == DistributedType.DEEPSPEED:\n result = self._prepare_deepspeed(*args)\n else:\n- result = tuple(self._prepare_one(obj, first_pass=True) for obj in args)\n- result = tuple(self._prepare_one(obj) for obj in result)\n+ result = tuple(\n+ self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)\n+ )\n+ result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement))\n \n if tpu_should_fix_optimizer:\n # 2. grabbing new model parameters\n@@ -674,7 +692,7 @@ def prepare(self, *args):\n \n return result if len(result) > 1 else result[0]\n \n- def prepare_model(self, model: torch.nn.Module):\n+ def prepare_model(self, model: torch.nn.Module, device_placement=None):\n \"\"\"\n Prepares a PyTorch model for training in any distributed setup. It is recommended to use\n [`Accelerator.prepare`] instead.\n@@ -682,9 +700,13 @@ def prepare_model(self, model: torch.nn.Module):\n Args:\n model (`torch.nn.Module`):\n A PyTorch model to prepare\n+ device_placement (`bool`, *optional*):\n+ Whether or not to place the model on the proper device. Will default to `self.device_placement`.\n \"\"\"\n+ if device_placement is None:\n+ device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP\n self._models.append(model)\n- if self.device_placement and self.distributed_type != DistributedType.FSDP:\n+ if device_placement:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n@@ -894,7 +916,7 @@ def _prepare_deepspeed(self, *args):\n )\n return tuple(result)\n \n- def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader):\n+ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_placement=None):\n \"\"\"\n Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use\n [`Accelerator.prepare`] instead.\n@@ -902,19 +924,24 @@ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader):\n Args:\n data_loader (`torch.utils.data.DataLoader`):\n A vanilla PyTorch DataLoader to prepare\n+ device_placement (`bool`, *optional*):\n+ Whether or not to place the batches on the proper device in the prepared dataloader. Will default to\n+ `self.device_placement`.\n \"\"\"\n+ if device_placement is None:\n+ device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False\n return prepare_data_loader(\n data_loader,\n self.device,\n num_processes=self.num_processes,\n process_index=self.process_index,\n split_batches=self.split_batches,\n- put_on_device=self.device_placement if self.distributed_type != DistributedType.TPU else False,\n+ put_on_device=device_placement,\n rng_types=self.rng_types.copy(),\n dispatch_batches=self.dispatch_batches,\n )\n \n- def prepare_optimizer(self, optimizer: torch.optim.Optimizer):\n+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):\n \"\"\"\n Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use\n [`Accelerator.prepare`] instead.\n@@ -922,8 +949,12 @@ def prepare_optimizer(self, optimizer: torch.optim.Optimizer):\n Args:\n optimizer (`torch.optim.Optimizer`):\n A vanilla PyTorch optimizer to prepare\n+ device_placement (`bool`, *optional*):\n+ Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.\n \"\"\"\n- optimizer = AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n+ if device_placement is None:\n+ device_placement = self.device_placement\n+ optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)\n self._optimizers.append(optimizer)\n return optimizer\n \n", "code_comments": [ { "body": "Perhaps we can be a bit more specific here:\r\n\r\n```suggestion\r\n device_placement (`List[bool]`, *optional*):\r\n Used to customize whether automatic device placement should be performed for each object passed. Needs to be a list of the same length as\r\n `args`.\r\n```", "diff_hunk": "@@ -602,17 +602,32 @@ def _prepare_fsdp(self, *args):\n self._optimizers = optimizers\n return tuple(result)\n \n- def prepare(self, *args):\n+ def prepare(self, *args, device_placement=None):\n \"\"\"\n Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same\n order.\n \n- Accepts the following type of objects:\n+ Args:\n+ *args:\n+ Any of the following type of objects:\n+\n+ - `torch.utils.data.DataLoader`: PyTorch Dataloader\n+ - `torch.nn.Module`: PyTorch Module\n+ - `torch.optim.Optimizer`: PyTorch Optimizer\n+ - `torch.optim.lr_scheduler._LRScheduler`: PyTorch LR Scheduler\n+ device_placement (`List[bool]`, *optional*):\n+ Used to customize the device placement for each object passed. Needs to be a list of the same length as\n+ `args`.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/716", "pr_id": 1065394516 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex bb58ca57f..cd74b68e2 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -674,7 +674,15 @@ def prepare(self, *args):\n \n return result if len(result) > 1 else result[0]\n \n- def prepare_model(self, model):\n+ def prepare_model(self, model: torch.nn.Module):\n+ \"\"\"\n+ Prepares a PyTorch model for training in any distributed setup. It is recommended to use\n+ [`Accelerator.prepare`] instead.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ A PyTorch model to prepare\n+ \"\"\"\n self._models.append(model)\n if self.device_placement and self.distributed_type != DistributedType.FSDP:\n model = model.to(self.device)\n@@ -886,7 +894,15 @@ def _prepare_deepspeed(self, *args):\n )\n return tuple(result)\n \n- def prepare_data_loader(self, data_loader):\n+ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader):\n+ \"\"\"\n+ Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use\n+ [`Accelerator.prepare`] instead.\n+\n+ Args:\n+ data_loader (`torch.utils.data.DataLoader`):\n+ A vanilla PyTorch DataLoader to prepare\n+ \"\"\"\n return prepare_data_loader(\n data_loader,\n self.device,\n@@ -898,12 +914,28 @@ def prepare_data_loader(self, data_loader):\n dispatch_batches=self.dispatch_batches,\n )\n \n- def prepare_optimizer(self, optimizer):\n+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer):\n+ \"\"\"\n+ Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use\n+ [`Accelerator.prepare`] instead.\n+\n+ Args:\n+ optimizer (`torch.optim.Optimizer`):\n+ A vanilla PyTorch optimizer to prepare\n+ \"\"\"\n optimizer = AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n self._optimizers.append(optimizer)\n return optimizer\n \n- def prepare_scheduler(self, scheduler):\n+ def prepare_scheduler(self, scheduler: torch.optim.lr_scheduler._LRScheduler):\n+ \"\"\"\n+ Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use\n+ [`Accelerator.prepare`] instead.\n+\n+ Args:\n+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):\n+ A vanilla PyTorch scheduler to prepare\n+ \"\"\"\n # We try to find the optimizer associated with `scheduler`, the default is the full list.\n optimizer = self._optimizers\n for opt in self._optimizers:\n@@ -1133,7 +1165,7 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None, init_k\n Optional starting configuration to be logged.\n init_kwargs (`dict`, *optional*):\n A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be\n- formatted like this:\n+ formatted like so:\n ```python\n {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\n ```\n@@ -1182,7 +1214,7 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\n The run step. If included, the log will be affiliated with this step.\n log_kwargs (`dict`, *optional*):\n A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted\n- like this:\n+ like so:\n ```python\n {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\n ```\n@@ -1193,7 +1225,8 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\n @on_main_process\n def end_training(self):\n \"\"\"\n- Runs any special end training behaviors, such as stopping trackers on the main process only.\n+ Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be\n+ called at the end of your script if using experiment tracking.\n \"\"\"\n for tracker in self.trackers:\n tracker.finish()\n@@ -1383,6 +1416,15 @@ def _get_devices(self, *args):\n return (model_device, optimizer_device)\n \n def get_state_dict(self, model, unwrap=True):\n+ \"\"\"\n+ Returns the state dictionary of a model sent through [`Accelerator.prepare`] in full precision\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ A PyTorch model sent through [`Accelerator.prepare`]\n+ unwrap (`bool`, *optional*, defaults to True):\n+ Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict\n+ \"\"\"\n is_zero_3 = False\n if self.distributed_type == DistributedType.DEEPSPEED:\n is_zero_3 = self.deepspeed_config[\"zero_optimization\"][\"stage\"] == 3\n", "code_comments": [ { "body": "I think the other types are all subclasses of `Module`\r\n```suggestion\r\n model (`torch.nn.Module`):\r\n```", "diff_hunk": "@@ -1383,6 +1416,15 @@ def _get_devices(self, *args):\n return (model_device, optimizer_device)\n \n def get_state_dict(self, model, unwrap=True):\n+ \"\"\"\n+ Returns the state dictionary of a model sent through [`Accelerator.prepare`] in full precision\n+\n+ Args:\n+ model (`Module`, `DistributedDataParallel`, `FullyShardedDataParallel`, `MpModelWrapper`):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/715", "pr_id": 1065343886 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 552c8392c..bb58ca57f 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -125,8 +125,8 @@ class Accelerator:\n - `\"tensorboard\"`\n - `\"wandb\"`\n - `\"comet_ml\"`\n- If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\n- accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n+ If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n A path to a directory for storing logs of locally-compatible loggers.\n dispatch_batches (`bool`, *optional*):\n@@ -643,7 +643,7 @@ def prepare(self, *args):\n \"The model and the optimizer parameters are not on the same device, which probably means you \"\n \"created an optimizer around your model **before** putting on the device. Make sure the line \"\n \"model.to(device) is before the optimizer creation in your script or remove it entirely and use \"\n- \"the flag default value for `devicement_placement` in your `Accelerator` to let it handle that \"\n+ \"the flag default value for `device_placement` in your `Accelerator` to let it handle that \"\n \"part for you.\"\n )\n \ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 3c67b6ee5..9d57c144f 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -44,7 +44,7 @@ def init_empty_weights(include_buffers: bool = False):\n \n Example:\n \n- ```pyton\n+ ```python\n import torch.nn as nn\n from accelerate import init_empty_weights\n \n@@ -324,7 +324,7 @@ def load_checkpoint_and_dispatch(\n dtype (`str` or `torch.dtype`, *optional*):\n If provided, the weights will be converted to that type when loaded.\n offload_state_dict (`bool`, *optional*):\n- If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\n+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\n the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map\n picked contains `\"disk\"` values.\n preload_module_classes (`List[str]`, *optional*):\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex ee6d194f1..a438ab45b 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -424,8 +424,8 @@ def filter_trackers(\n - `\"tensorboard\"`\n - `\"wandb\"`\n - `\"comet_ml\"`\n- If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\n- accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n+ If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n A path to a directory for storing logs of locally-compatible loggers.\n \"\"\"\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 467a757b4..0b57ae6d8 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -617,7 +617,7 @@ def load_checkpoint_in_model(\n dtype (`str` or `torch.dtype`, *optional*):\n If provided, the weights will be converted to that type when loaded.\n offload_state_dict (`bool`, *optional*, defaults to `False`):\n- If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\n+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if\n the weight of the CPU state dict + the biggest shard does not fit.\n \"\"\"\n if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Hey @sgugger ,\r\n\r\nTrying to run your Makefile has proven really tough for me:\r\n1. `python` doesn't exist on ubuntu. Setup a symlink for `python` -> `python3` ...\r\n\r\n2. `python3.9` running `make style` tries to change 30 files\r\n`30 files reformatted, 55 files left unchanged.`\r\n\r\n3. Tried to match the runner and did a `docker run ... 3.7.... -v .:/accelerate` ...\r\nThis still gave different results than the CI runner:\r\n`5 files reformatted, 80 files left unchanged.`\r\n\r\nI'm unable to actually get my black/isort to match what the test unhappy about.\r\n\r\nIt appears the test is failing for a line length style issue from a file that wasn't actually touched in this commit... \r\n\r\nIt feels like it should have tripped on #711 -- not here. \r\n\r\n```\r\npython utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only\r\nTraceback (most recent call last):\r\n File \"utils/style_doc.py\", line 556, in <module>\r\n main(*args.files, max_len=args.max_len, check_only=args.check_only)\r\n File \"utils/style_doc.py\", line 544, in main\r\n raise ValueError(f\"{len(changed)} files should be restyled!\")\r\n```\r\n\r\nCan we just merge this in, and fix that style next time you lint?\r\n\r\nOr could you jump in on this one and help run the `make style` to get that file to format correctly based on your black/isort spec?\r\n\r\nIs there a docker image or something setup to run your `Makefile` in a reproducible environment? I'm probably missing something...", "from_author": true }, { "body": "Hey @sgugger --\r\n\r\nThis is jumping the shark, but I do have a (arguably) nice pre-commit template that handles autoflake, isort, black and works well cross-platform.\r\n\r\nHere's a screenshot. If you want to re-do the makefile, I can start a new ticket and switch it to pre-commit.\r\n\r\n![image](https://user-images.githubusercontent.com/523300/191733560-c07f973d-5c9c-4087-a8b0-6303ba028169.png)\r\n\r\nThis also checks for conventional commits format(optional).\r\n\r\nThis would be a bit bigger in scope and more opinionated than what I thought I was starting with some readability PR's :)\r\n\r\nLet me know if this is something that's of interest to swapping out for the huggingface team... if so, we can throw it in a new issue.\r\n", "from_author": true }, { "body": "Hi @ryanrussell, I think the issue here is the wrong black/flake8 may be installed? Can you try running `make style` again after doing `pip install -e .[quality]` (or `pip install accelerate[quality]` depending on how you have it installed)?", "from_author": false }, { "body": "Hi @ryanrussell. 90% on the team is on Ubuntu and have no problem with running `make style` without doing any aliases. Not sure why you don't have the `python` command available after installing Python.\r\n\r\nThen as @muellerzr said, you need to have the pinned versions we use for the libraries formatting the code, which is why we recommend using a virtual environment. I've pushed a style commit to move forward with this PR.\r\n\r\nI don't want to add pre-commit hooks as I prefer having the ability to do the styling in a separate commit when I'm afraid it might break something.", "from_author": false }, { "body": "Hey @sgugger -- thanks for helping fix this. Sounds like it's a user error on my side.\r\n\r\nI'll wait to send any more PR's until I setup a venv with correct version pins and can replicate the expected linting from the CI...\r\n\r\nJust found the discord, so joining there... if I run into any issues, much easier to iterate there than through comments :+1: ", "from_author": true }, { "body": "Failures are unrelated (something in the dev of evaluate messed up) so merging!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/713", "pr_id": 1063333029 }, { "diff": "diff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\nindex 5c04b8621..493444daa 100644\n--- a/src/accelerate/hooks.py\n+++ b/src/accelerate/hooks.py\n@@ -71,7 +71,7 @@ def post_forward(self, module, output):\n \n def detach_hook(self, module):\n \"\"\"\n- To be executed when the hook is deached from a module.\n+ To be executed when the hook is detached from a module.\n \n Args:\n module (`torch.nn.Module`): The module detached from this hook.\n@@ -182,7 +182,7 @@ class AlignDevicesHook(ModelHook):\n Args:\n execution_device (`torch.device`, *optional*):\n The device on which inputs and model weights should be placed before the forward pass.\n- offload (`bool`, *optional*, defauts to `False`):\n+ offload (`bool`, *optional*, defaults to `False`):\n Whether or not the weights should be offloaded after the forward pass.\n io_same_device (`bool`, *optional*, defaults to `False`):\n Whether or not the output should be placed on the same device as the input was.\n@@ -319,7 +319,7 @@ def attach_align_device_hook(\n The module where we want to attach the hooks.\n execution_device (`torch.device`, *optional*):\n The device on which inputs and model weights should be placed before the forward pass.\n- offload (`bool`, *optional*, defauts to `False`):\n+ offload (`bool`, *optional*, defaults to `False`):\n Whether or not the weights should be offloaded after the forward pass.\n weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n@@ -402,7 +402,7 @@ def attach_align_device_hook_on_blocks(\n execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):\n The device on which inputs and model weights should be placed before the forward pass. It can be one device\n for the whole module, or a dictionary mapping module name to device.\n- offload (`bool`, *optional*, defauts to `False`):\n+ offload (`bool`, *optional*, defaults to `False`):\n Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole\n module, or a dictionary mapping module name to boolean.\n weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/712", "pr_id": 1063301116 }, { "diff": "diff --git a/utils/style_doc.py b/utils/style_doc.py\nindex 2a325805f..0422ebeb4 100644\n--- a/utils/style_doc.py\n+++ b/utils/style_doc.py\n@@ -28,7 +28,7 @@\n # Regexes\n # Re pattern that catches list introduction (with potential indent)\n _re_list = re.compile(r\"^(\\s*-\\s+|\\s*\\*\\s+|\\s*\\d+\\.\\s+)\")\n-# Re pattern that catches code block introduction (with potentinal indent)\n+# Re pattern that catches code block introduction (with potential indent)\n _re_code = re.compile(r\"^(\\s*)```(.*)$\")\n # Re pattern that catches rst args blocks of the form `Parameters:`.\n _re_args = re.compile(\"^\\s*(Args?|Arguments?|Params?|Parameters?):\\s*$\")\n@@ -62,7 +62,7 @@ def parse_code_example(code_lines):\n \n Args:\n code_lines (`List[str]`): The code lines to parse.\n- max_len (`int`): The maximum lengh per line.\n+ max_len (`int`): The maximum length per line.\n \n Returns:\n (List[`str`], List[`str`]): The list of code samples and the list of outputs.\n@@ -109,7 +109,7 @@ def format_code_example(code: str, max_len: int, in_docstring: bool = False):\n \n Args:\n code (`str`): The code example to format.\n- max_len (`int`): The maximum lengh per line.\n+ max_len (`int`): The maximum length per line.\n in_docstring (`bool`, *optional*, defaults to `False`): Whether or not the code example is inside a docstring.\n \n Returns:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/711", "pr_id": 1063297381 }, { "diff": "diff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex 917ee329d..5e26eb9b5 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -85,12 +85,12 @@ def setUp(self):\n self.add_mocks(mock.patch.dict(os.environ, {\"WANDB_DIR\": self.tmpdir}))\n \n @staticmethod\n- def get_value_from_log(key: str, log: str, key_occurance: int = 0):\n+ def get_value_from_log(key: str, log: str, key_occurrence: int = 0):\n \"\"\"\n Parses wandb log for `key` and returns the value.\n- If parsing through multiple calls to .log, pass in a `key_occurance`\n+ If parsing through multiple calls to .log, pass in a `key_occurrence`\n \"\"\"\n- res = re.findall(rf\"(?<={key} )[^\\s]+\", log)[key_occurance]\n+ res = re.findall(rf\"(?<={key} )[^\\s]+\", log)[key_occurrence]\n if '\"' in res:\n return re.findall(r'\"([^\"]*)\"', res)[0]\n else:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/710", "pr_id": 1063295373 }, { "diff": "diff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex 707c93c4c..87f804ce6 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -58,7 +58,7 @@\n EVAL_BATCH_SIZE = 32\n \n # New Code #\n-# We need a different `get_dataloaders` function that will build dataloaders by indexs\n+# We need a different `get_dataloaders` function that will build dataloaders by index\n \n \n def get_fold_dataloaders(\n@@ -71,9 +71,9 @@ def get_fold_dataloaders(\n accelerator (`Accelerator`):\n The main `Accelerator` object\n train_idxs (list of `int`):\n- The split indicies for the training dataset\n+ The split indices for the training dataset\n valid_idxs (list of `int`):\n- The split indicies for the validation dataset\n+ The split indices for the validation dataset\n batch_size (`int`):\n The size of the minibatch. Default is 16\n \"\"\"\ndiff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\nindex 89234a911..36ace84b9 100755\n--- a/examples/by_feature/deepspeed_with_config_support.py\n+++ b/examples/by_feature/deepspeed_with_config_support.py\n@@ -525,7 +525,7 @@ def group_texts(examples):\n },\n ]\n # New Code #\n- # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer\n optimizer_cls = (\n torch.optim.AdamW\n if accelerator.state.deepspeed_plugin is None\n@@ -554,7 +554,7 @@ def group_texts(examples):\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n \n # New Code #\n- # Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler\n+ # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler\n if (\n accelerator.state.deepspeed_plugin is None\n or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 9f0d55c69..82dc34b8a 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -29,7 +29,7 @@\n ########################################################################\n # This is a fully working simple example to use Accelerate,\n # specifically showcasing how to ensure out-of-memory errors never\n-# iterrupt training, and builds off the `nlp_example.py` script.\n+# interrupt training, and builds off the `nlp_example.py` script.\n #\n # This example trains a Bert base model on GLUE MRPC\n # in any of the following settings (with the same script):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/709", "pr_id": 1063269081 }, { "diff": "diff --git a/docs/source/usage_guides/sagemaker.mdx b/docs/source/usage_guides/sagemaker.mdx\nindex 6195783e9..0afe52e29 100644\n--- a/docs/source/usage_guides/sagemaker.mdx\n+++ b/docs/source/usage_guides/sagemaker.mdx\n@@ -129,7 +129,26 @@ You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04\n \n ### Distributed Training: Data Parallelism\n \n-*currently in development, will be supported soon.*\n+Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up.\n+To use SageMaker DDP, select it when asked \n+`What is the distributed mode? ([0] No distributed training, [1] data parallelism):`.\n+Example config below:\n+```yaml\n+base_job_name: accelerate-sagemaker-1\n+compute_environment: AMAZON_SAGEMAKER\n+distributed_type: DATA_PARALLEL\n+ec2_instance_type: ml.p3.16xlarge\n+iam_role_name: xxxxx\n+image_uri: null\n+mixed_precision: fp16\n+num_machines: 1\n+profile: xxxxx\n+py_version: py38\n+pytorch_version: 1.10.2\n+region: us-east-1\n+transformers_version: 4.17.0\n+use_cpu: false\n+```\n \n ### Distributed Training: Model Parallelism\n \ndiff --git a/examples/README.md b/examples/README.md\nindex c4f050b3f..6a3c0a11a 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -187,6 +187,9 @@ To run it in each of these various modes, use the following commands:\n ### Simple vision example (GANs)\n \n - [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)\n+\n+### Using AWS SageMaker integration\n+- [Examples showcasing AWS SageMaker integration of πŸ€— Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker)\n \n ## Finer Examples\n \ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex 786881edf..b3a45c9e4 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -157,7 +157,7 @@ def get_sagemaker_input():\n )\n \n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] data parallelism): \",\n+ \"What is the distributed mode? ([0] No distributed training, [1] data parallelism): \",\n _convert_sagemaker_distributed_mode,\n error_message=\"Please enter 0 or 1\",\n )\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/708", "pr_id": 1062918270 }, { "diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex cb0a7ee42..277a81df9 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -7,6 +7,7 @@ on:\n \n env:\n RUN_SLOW: \"yes\"\n+ IS_GITHUB_CI: \"1\"\n \n jobs:\n run_all_tests_single_gpu:\n@@ -27,6 +28,7 @@ jobs:\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }} \n pip install -e . --no-deps\n+ pip install pytest-reportlog\n \n - name: Run test on GPUs\n run: |\n@@ -37,6 +39,11 @@ jobs:\n source activate accelerate\n pip uninstall comet_ml -y\n make test_examples\n+ \n+ - name: Generate Report\n+ if: always()\n+ run: |\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\n \n run_all_tests_multi_gpu:\n runs-on: [self-hosted, docker-gpu, multi-gpu]\n@@ -56,6 +63,7 @@ jobs:\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n pip install -e . --no-deps\n+ pip install pytest-reportlog\n \n - name: Run core and big modeling tests on GPUs\n run: |\n@@ -72,4 +80,9 @@ jobs:\n run: |\n source activate accelerate\n pip uninstall comet_ml -y\n- make test_examples\n\\ No newline at end of file\n+ make test_examples\n+\n+ - name: Generate Report\n+ if: always()\n+ run: |\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\n\\ No newline at end of file\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nindex 028b99d9a..a794cd72b 100644\n--- a/.github/workflows/on-merge.yml\n+++ b/.github/workflows/on-merge.yml\n@@ -6,6 +6,7 @@ on:\n \n env:\n TESTING_MOCKED_DATALOADERS: \"1\"\n+ IS_GITHUB_CI: \"1\"\n \n jobs:\n run_all_tests_single_gpu:\n@@ -26,6 +27,7 @@ jobs:\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n pip install -e .[testing,test_trackers]\n+ pip install pytest-reportlog\n \n - name: Run test on GPUs\n run: |\n@@ -37,6 +39,11 @@ jobs:\n pip uninstall comet_ml -y\n make test_examples\n \n+ - name: Generate Report\n+ if: always()\n+ run: |\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\n+\n run_all_tests_multi_gpu:\n runs-on: [self-hosted, docker-gpu, multi-gpu]\n container:\n@@ -53,6 +60,7 @@ jobs:\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n pip install -e .[testing,test_trackers]\n+ pip install pytest-reportlog\n \n - name: Run test on GPUs\n run: |\n@@ -63,4 +71,9 @@ jobs:\n run: |\n source activate accelerate\n pip uninstall comet_ml -y\n- make test_examples\n\\ No newline at end of file\n+ make test_examples\n+\n+ - name: Generate Report\n+ if: always()\n+ run: |\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\n\\ No newline at end of file\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 2cf8e34a7..d39d36951 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -5,7 +5,7 @@ on: [pull_request]\n env:\n HF_HOME: ~/hf_cache\n TESTING_MOCKED_DATALOADERS: \"1\"\n- CI: 1\n+ IS_GITHUB_CI: \"1\"\n \n jobs:\n run-tests:\ndiff --git a/Makefile b/Makefile\nindex 0c9ad0ca9..52c594ae7 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -25,40 +25,40 @@ style:\n \t\n # Run tests for the library\n test:\n-\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(CI),--report-log 'all.log',)\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log 'all.log',)\n \n test_big_modeling:\n-\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(CI),--report-log 'big_modeling.log',)\n+\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log 'big_modeling.log',)\n \n test_core:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\n-\t--ignore=./tests/fsdp $(if $(CI),--report-log 'core.log',)\n+\t--ignore=./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'core.log',)\n \n test_deepspeed:\n-\tpython -m pytest -s -v ./tests/deepspeed $(if $(CI),--report-log 'deepspeed.log',)\n+\tpython -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log 'deepspeed.log',)\n \n test_fsdp:\n-\tpython -m pytest -s -v ./tests/fsdp $(if $(CI),--report-log 'fsdp.log',)\n+\tpython -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'fsdp.log',)\n \n test_examples:\n-\tpython -m pytest -s -v ./tests/test_examples.py $(if $(CI),--report-log 'examples.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log 'examples.log',)\n \n # Broken down example tests for the CI runners\n test_integrations:\n-\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(CI),--report-log 'integrations.log',)\n+\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'integrations.log',)\n \n test_example_differences:\n-\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(CI),--report-log 'example_diff.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log 'example_diff.log',)\n \n test_checkpoint_epoch:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(CI),--report-log 'checkpoint_epoch.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(IS_GITHUB_CI),--report-log 'checkpoint_epoch.log',)\n \n test_checkpoint_step:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(CI),--report-log 'checkpoint_step.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(IS_GITHUB_CI),--report-log 'checkpoint_step.log',)\n \n # Same as test but used to install only the base dependencies\n test_prod:\n \t$(MAKE) test_core\n \n test_rest:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(CI),--report-log 'rest.log',)\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(IS_GITHUB_CI),--report-log 'rest.log',)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/704", "pr_id": 1061646486 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 3fec513d2..2cf8e34a7 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -5,11 +5,13 @@ on: [pull_request]\n env:\n HF_HOME: ~/hf_cache\n TESTING_MOCKED_DATALOADERS: \"1\"\n+ CI: 1\n \n jobs:\n run-tests:\n runs-on: ubuntu-latest\n strategy:\n+ fail-fast: false\n matrix:\n pytorch-version: [\n latest,\n@@ -48,7 +50,13 @@ jobs:\n if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi\n if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi\n if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi\n+ pip install pytest-reportlog\n \n - name: Run Tests\n run: |\n- make ${{ matrix.test-kind }}\n\\ No newline at end of file\n+ make ${{ matrix.test-kind }}\n+\n+ - name: Generate Report\n+ if: always()\n+ run: |\n+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY\n\\ No newline at end of file\ndiff --git a/Makefile b/Makefile\nindex c1c79d6e8..0c9ad0ca9 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -25,39 +25,40 @@ style:\n \t\n # Run tests for the library\n test:\n-\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(CI),--report-log 'all.log',)\n \n test_big_modeling:\n-\tpython -m pytest -s -v ./tests/test_big_modeling.py\n+\tpython -m pytest -s -v ./tests/test_big_modeling.py $(if $(CI),--report-log 'big_modeling.log',)\n \n test_core:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\n-\t--ignore=./tests/fsdp\n+\t--ignore=./tests/fsdp $(if $(CI),--report-log 'core.log',)\n \n test_deepspeed:\n-\tpython -m pytest -s -v ./tests/deepspeed\n+\tpython -m pytest -s -v ./tests/deepspeed $(if $(CI),--report-log 'deepspeed.log',)\n \n test_fsdp:\n-\tpython -m pytest -s -v ./tests/fsdp\n+\tpython -m pytest -s -v ./tests/fsdp $(if $(CI),--report-log 'fsdp.log',)\n \n test_examples:\n-\tpython -m pytest -s -v ./tests/test_examples.py\n+\tpython -m pytest -s -v ./tests/test_examples.py $(if $(CI),--report-log 'examples.log',)\n \n # Broken down example tests for the CI runners\n test_integrations:\n-\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp\n+\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(CI),--report-log 'integrations.log',)\n+\n test_example_differences:\n-\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests\n+\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(CI),--report-log 'example_diff.log',)\n \n test_checkpoint_epoch:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\"\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\" $(if $(CI),--report-log 'checkpoint_epoch.log',)\n \n test_checkpoint_step:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\"\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\" $(if $(CI),--report-log 'checkpoint_step.log',)\n \n # Same as test but used to install only the base dependencies\n test_prod:\n \t$(MAKE) test_core\n \n test_rest:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\"\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\" $(if $(CI),--report-log 'rest.log',)\ndiff --git a/utils/log_reports.py b/utils/log_reports.py\nnew file mode 100644\nindex 000000000..f701f08c2\n--- /dev/null\n+++ b/utils/log_reports.py\n@@ -0,0 +1,34 @@\n+import json\n+from pathlib import Path \n+import subprocess\n+\n+failed = []\n+passed = []\n+\n+group_info = []\n+\n+total_num_failed = 0\n+for log in Path().glob(\"*.log\"):\n+ section_num_failed = 0\n+ with open(log, \"r\") as f:\n+ for line in f:\n+ line = json.loads(line)\n+ if line.get(\"nodeid\", \"\") != \"\":\n+ test = line[\"nodeid\"]\n+ if line.get(\"duration\", None) is not None:\n+ duration = f'{line[\"duration\"]:.4f}'\n+ if line.get(\"outcome\", \"\") == \"failed\":\n+ section_num_failed += 1\n+ failed.append([test, duration])\n+ else:\n+ passed.append([test, duration])\n+ group_info.append([str(log), section_num_failed])\n+\n+if len(failed) > 0:\n+ result = \"## Failed Tests:\\n\"\n+ failed_table = '| Test Location | Test Class | Test Name |\\n|---|---|---|\\n| '\n+ for test in failed:\n+ failed_table += ' | '.join(test[0].split(\"::\"))\n+ failed_table += \" |\"\n+ result += failed_table\n+ print(result)\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/703", "pr_id": 1060736877 }, { "diff": "diff --git a/docs/source/usage_guides/memory.mdx b/docs/source/usage_guides/memory.mdx\nindex 9c5674977..8841cd584 100644\n--- a/docs/source/usage_guides/memory.mdx\n+++ b/docs/source/usage_guides/memory.mdx\n@@ -48,4 +48,4 @@ def training_function(args):\n + inner_training_loop()\n ```\n \n-To find out more, check the documentation [here](package_reference/utilities#accelerate.find_executable_batch_size)\n\\ No newline at end of file\n+To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).\n", "code_comments": [], "context": [ { "body": "Hello @tomaarsen, looks good, thanks for your contribution πŸ˜„", "from_author": false }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks a lot!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/702", "pr_id": 1058450492 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 311111ad4..0750248de 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -75,15 +75,35 @@ def register_empty_buffer(module, name, buffer):\n if buffer is not None:\n module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n \n+ # Patch tensor creation\n+ if include_buffers:\n+ tensor_constructors_to_patch = {\n+ torch_function_name: getattr(torch, torch_function_name)\n+ for torch_function_name in [\"empty\", \"zeros\", \"ones\", \"full\"]\n+ }\n+ else:\n+ tensor_constructors_to_patch = {}\n+\n+ def patch_tensor_constructor(fn):\n+ def wrapper(*args, **kwargs):\n+ kwargs[\"device\"] = torch.device(\"meta\")\n+ return fn(*args, **kwargs)\n+\n+ return wrapper\n+\n try:\n nn.Module.register_parameter = register_empty_parameter\n if include_buffers:\n nn.Module.register_buffer = register_empty_buffer\n+ for torch_function_name in tensor_constructors_to_patch.keys():\n+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))\n yield\n finally:\n nn.Module.register_parameter = old_register_parameter\n if include_buffers:\n nn.Module.register_buffer = old_register_buffer\n+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():\n+ setattr(torch, torch_function_name, old_torch_function)\n \n \n def cpu_offload(\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I don't really understand why this is needed: while load a pretrained model inside the context manager and complain it takes time?\r\n```py\r\nfrom accelerate import init_empty_weights\r\nfrom transformers import AutoConfig, AutoModel\r\n\r\nconfig = AutoConfig.from_pretrained(\"gpt2\")\r\n\r\nwith init_empty_weights():\r\n model = AutoModel.from_config(config)\r\n```\r\nis way faster than 6s", "from_author": false }, { "body": "Hum it's doing it 10 times, so 0.6 sec per load. Benchmarking your solution displays the same order of magnitude: `14.567796499999996` (I think the difference is that the use of config doesn't require to read from disk the checkpoint anymore)\r\n\r\nThough your workaround removed the need to override `torch.load` and especially the hack that I introduced.\r\n```\r\nUsing config + this PR: 4.1660165419999995\r\n```", "from_author": true }, { "body": "Also I'm not sure why this wasn't detected, but the test `tests/test_big_modeling.py::BigModelingTester::test_init_empty_weights` pass on my MAC contrary to CI ... Moving back to draft as I need to figure this one out.", "from_author": true }, { "body": "Actually if we activate this feature only when `include_buffers=True` (I'm guessing the assumption is that all pytorch tensors are expected to be `meta`) then that should be fine.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/699", "pr_id": 1056024034 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex b17ea387d..552c8392c 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -747,6 +747,12 @@ def _prepare_deepspeed(self, *args):\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\n result = [obj for obj in args]\n \n+ if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]:\n+ logger.info(\n+ f\"Updating DeepSpeed's gradient accumulation steps to {self.gradient_accumulation_steps} from \"\n+ f\"{deepspeed_plugin.deepspeed_config['gradient_accumulation_steps']}.\"\n+ )\n+ deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"] = self.gradient_accumulation_steps\n config_kwargs = {\n \"train_micro_batch_size_per_gpu\": batch_size_per_device,\n \"train_batch_size\": batch_size_per_device\n", "code_comments": [ { "body": "No need for a warning here, I think a logging info is sufficient (the fact that args to the `Accelerator` supercede the config is a common behavior, you can do it to force CPU or a given mixed precision already).", "diff_hunk": "@@ -747,6 +747,12 @@ def _prepare_deepspeed(self, *args):\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\n result = [obj for obj in args]\n \n+ if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]:\n+ warnings.warn(", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -747,6 +747,12 @@ def _prepare_deepspeed(self, *args):\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\n result = [obj for obj in args]\n \n+ if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]:\n+ warnings.warn(", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/698", "pr_id": 1055539565 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 84c2ebd67..1eb8ff79d 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -364,10 +364,11 @@ def __iter__(self):\n \n @property\n def total_batch_size(self):\n+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\n return (\n- self.batch_sampler.batch_size\n- if self.batch_sampler.split_batches\n- else (self.batch_sampler.batch_size * self.batch_sampler.num_processes)\n+ batch_sampler.batch_size\n+ if batch_sampler.split_batches\n+ else (batch_sampler.batch_size * batch_sampler.num_processes)\n )\n \n @property\n@@ -639,14 +640,17 @@ def prepare_data_loader(\n )\n else:\n # New batch sampler for the current process.\n- if hasattr(dataloader.sampler, \"generator\"):\n- if dataloader.sampler.generator is None:\n- dataloader.sampler.generator = torch.Generator()\n- generator = dataloader.sampler.generator\n- generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\n- elif getattr(dataloader.batch_sampler, \"generator\", None) is not None:\n- generator = dataloader.batch_sampler.generator\n sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)\n+ if sampler_is_batch_sampler:\n+ sampler = dataloader.sampler.sampler\n+ else:\n+ sampler = dataloader.batch_sampler.sampler\n+ if hasattr(sampler, \"generator\"):\n+ if sampler.generator is None:\n+ sampler.generator = torch.Generator()\n+ generator = sampler.generator\n+ generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\n+\n batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\n new_batch_sampler = BatchSamplerShard(\n batch_sampler,\n@@ -692,6 +696,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),\n rng_types=rng_types,\n generator=generator,\n **kwargs,\n", "code_comments": [ { "body": "I think we should use a instance check on the sampler here, as in my tests, there was a `batch_sampler` when using a `BatchSampler` for `sampler`:\r\n```py\r\nbatch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\r\n```", "diff_hunk": "@@ -364,10 +364,14 @@ def __iter__(self):\n \n @property\n def total_batch_size(self):\n+ if self.batch_sampler is None:\n+ batch_sampler = self.sampler\n+ else:\n+ batch_sampler = self.batch_sampler", "from_author": false }, { "body": "Would reuse the same `batch_size` as for the original `datalaoder` here.", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": false }, { "body": "This is necessary else `batch_sampler` would get created with `batch_size=1` when it shouldn't do that as the `sampler` is already an instance of `batch_sampler`. This line here: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/dataloader.py#L357\r\n\r\nThis leads to the issue `1.a` specified in the PR description\r\n", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": true }, { "body": "But it's not our place to fix users mistakes and they might have wanted to use `batch_size=1` with this for some reason instead of `batch_size=None`.", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": false }, { "body": "Here, the Dataloader default is batch_size=1 and not the one fixed by user. The one fixed by user is used which is part of sampler which is an instance of BatchSampler. When sampler is an object of BatchSampler, batch_sampler shouldn't be created and for that batch_size to dataloader has to be None so that dataloader can use sampler directly to create batches. ", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": true }, { "body": "Yes, `batch_size` should be None, but in the examples of the issue linked above, the user did not do that. We should honor the same argument as was passed to the original dataloader which is saved [here](https://github.com/pytorch/pytorch/blob/d2d145a40001d1e1f815a144160bd0b8d0f60ea0/torch/utils/data/dataloader.py#L361).", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": false }, { "body": "Got it. However, even if the user passes `None` to dataloader, it isn't being passed to the final `DataLoaderShard` as batch_size is part of `ignore_kwargs` and that leads to default value of 1 being used. https://github.com/huggingface/accelerate/blob/main/src/accelerate/data_loader.py#L659", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": true }, { "body": "Yes when there is a `batch_sampler`, because PyTorch raises an error and does not let us do that. But here we only pass a sampler ;-)", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -364,10 +364,14 @@ def __iter__(self):\n \n @property\n def total_batch_size(self):\n+ if self.batch_sampler is None:\n+ batch_sampler = self.sampler\n+ else:\n+ batch_sampler = self.batch_sampler", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -692,6 +699,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=None,", "from_author": true }, { "body": "If `(batch_size != 1 and batch_size is not None) and sampler_is_batch_sampler`, is there any \"default\" way to shard batches without explicit instruction from user? I think at least a warning should be raised.", "diff_hunk": "@@ -692,6 +696,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),", "from_author": false }, { "body": "BTW, `hasattr(dataloader, \"batch_size\")` is always `True` for `DataLoader`.", "diff_hunk": "@@ -692,6 +696,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),", "from_author": false }, { "body": "> BTW, `hasattr(dataloader, \"batch_size\")` is always `True` for `DataLoader`.\r\n\r\nresolving this in the other PR. wrt warning, current logic follows PyTorch API and as such warning isn't warranted.", "diff_hunk": "@@ -692,6 +696,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n+ batch_size=getattr(dataloader, \"batch_size\", _PYTORCH_DATALOADER_KWARGS[\"batch_size\"]),", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/696", "pr_id": 1053039468 }, { "diff": "diff --git a/docs/source/usage_guides/mps.mdx b/docs/source/usage_guides/mps.mdx\nindex 3648bdce8..7a7f8a628 100644\n--- a/docs/source/usage_guides/mps.mdx\n+++ b/docs/source/usage_guides/mps.mdx\n@@ -19,7 +19,7 @@ This will map computational graphs and primitives on the MPS Graph framework and\n For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n \n-### Benefits of Training and Inference using Apple M1 Chips\n+### Benefits of Training and Inference using Apple Silicon Chips\n \n 1. Enables users to train larger networks or batch sizes locally\n 2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex dcdead956..0387109b6 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -121,12 +121,17 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n \n else:\n- # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- if torch.cuda.is_available():\n+ # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.\n+ use_mps_device = \"false\"\n+ if torch.backends.mps.is_available():\n+ print(\"Launching training on MPS.\")\n+ use_mps_device = \"true\"\n+ elif torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n print(\"Launching training on CPU.\")\n- function(*args)\n+ with patch_environment(use_mps_device=use_mps_device):\n+ function(*args)\n \n \n def debug_launcher(function, args=(), num_processes=2):\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_695). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/695", "pr_id": 1052863881 }, { "diff": "diff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\nindex 51b7c3719..e95ed03bf 100644\n--- a/src/accelerate/utils/random.py\n+++ b/src/accelerate/utils/random.py\n@@ -64,7 +64,7 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n- elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU, DistributedType.FSDP]:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/688", "pr_id": 1051471370 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex fb5393d34..84c2ebd67 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -622,6 +622,7 @@ def prepare_data_loader(\n new_dataset = dataloader.dataset\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n+ sampler_is_batch_sampler = False\n generator = getattr(dataloader, \"generator\", None)\n # No change if no multiprocess\n if num_processes != 1 and not dispatch_batches:\n@@ -645,8 +646,10 @@ def prepare_data_loader(\n generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\n elif getattr(dataloader.batch_sampler, \"generator\", None) is not None:\n generator = dataloader.batch_sampler.generator\n+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)\n+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\n new_batch_sampler = BatchSamplerShard(\n- dataloader.batch_sampler,\n+ batch_sampler,\n num_processes=num_processes,\n process_index=process_index,\n split_batches=split_batches,\n@@ -684,6 +687,15 @@ def prepare_data_loader(\n _drop_last=dataloader.drop_last,\n **kwargs,\n )\n+ elif sampler_is_batch_sampler:\n+ dataloader = DataLoaderShard(\n+ new_dataset,\n+ device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n+ sampler=new_batch_sampler,\n+ rng_types=rng_types,\n+ generator=generator,\n+ **kwargs,\n+ )\n else:\n dataloader = DataLoaderShard(\n new_dataset,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/687", "pr_id": 1050421126 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex cbe39efa3..fb5393d34 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -462,10 +462,6 @@ def _fetch_batches(self, iterator):\n else:\n batch_info = [None, True]\n broadcast_object_list(batch_info)\n- if batch_info[1]:\n- return batch, batch_info\n- else:\n- return batch, batch_info\n return batch, batch_info\n \n def __iter__(self):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/686", "pr_id": 1048925301 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 989774148..b17ea387d 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -745,6 +745,7 @@ def _prepare_deepspeed(self, *args):\n )\n else:\n batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\n+ result = [obj for obj in args]\n \n config_kwargs = {\n \"train_micro_batch_size_per_gpu\": batch_size_per_device,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/683", "pr_id": 1047756035 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 9236ee310..cbe39efa3 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -463,10 +463,10 @@ def _fetch_batches(self, iterator):\n batch_info = [None, True]\n broadcast_object_list(batch_info)\n if batch_info[1]:\n- return batch, batch_info, True\n+ return batch, batch_info\n else:\n- return batch, batch_info, True\n- return batch, batch_info, False\n+ return batch, batch_info\n+ return batch, batch_info\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n@@ -477,11 +477,10 @@ def __iter__(self):\n stop_iteration = False\n self._stop_iteration = False\n first_batch = None\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ next_batch, next_batch_info = self._fetch_batches(main_iterator)\n while not stop_iteration:\n- batch, batch_info, skip = next_batch, next_batch_info, next_skip\n- if skip:\n- continue\n+ batch, batch_info = next_batch, next_batch_info\n+\n if self.state.process_index != 0:\n # Initialize tensors on other processes than process 0.\n batch = initialize_tensors(batch_info[0])\n@@ -500,7 +499,7 @@ def __iter__(self):\n if not stop_iteration:\n # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in\n # the dataloader since the number of batches is a round multiple of the number of processes.\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ next_batch, next_batch_info = self._fetch_batches(main_iterator)\n # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\n if self._stop_iteration and next_batch_info[0] is None:\n stop_iteration = True\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Hello, for some reason I wasn't notified by github about review request, looks good. I was thinking if the below part of `_fetch_batches` could be simplified \r\n\r\n```diff\r\ndef _fetch_batches(self, iterator):\r\n batches, batch = None, None\r\n # On process 0, we gather the batch to dispatch.\r\n if self.state.process_index == 0:\r\n try:\r\n if self.split_batches:\r\n # One batch of the main iterator is dispatched and split.\r\n batch = next(iterator)\r\n else:\r\n # num_processes batches of the main iterator are concatenated then dispatched and split.\r\n # We add the batches one by one so we have the remainder available when drop_last=False.\r\n batches = []\r\n for _ in range(self.state.num_processes):\r\n batches.append(next(iterator))\r\n batch = concatenate(batches, dim=0)\r\n # In both cases, we need to get the structure of the batch that we will broadcast on other\r\n # processes to initialize the tensors with the right shape.\r\n # data_structure, stop_iteration\r\n batch_info = [get_data_structure(batch), False]\r\n except StopIteration:\r\n batch_info = [None, True]\r\n else:\r\n batch_info = [None, self._stop_iteration]\r\n # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.\r\n broadcast_object_list(batch_info)\r\n self._stop_iteration = batch_info[1]\r\n if self._stop_iteration:\r\n # If drop_last is False and split_batches is False, we may have a remainder to take care of.\r\n if not self.split_batches and not self._drop_last:\r\n if self.state.process_index == 0 and len(batches) > 0:\r\n batch = concatenate(batches, dim=0)\r\n batch_info = [get_data_structure(batch), False]\r\n else:\r\n batch_info = [None, True]\r\n broadcast_object_list(batch_info)\r\n- if batch_info[1]:\r\n- return batch, batch_info\r\n- else:\r\n- return batch, batch_info\r\n return batch, batch_info\r\n```", "from_author": false }, { "body": "Yes, you're right thanks! Will make a follow-up PR.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/682", "pr_id": 1047708869 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex c7f235843..fad42792d 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -270,6 +270,25 @@ def launch_command_parser(subparsers=None):\n default=None,\n help=\"The port to use to communicate with the machine of rank 0.\",\n )\n+ # Rendezvous related arguments\n+ parser.add_argument(\n+ \"--rdzv_conf\",\n+ type=str,\n+ default=\"\",\n+ help=\"Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).\",\n+ )\n+ parser.add_argument(\n+ \"--max_restarts\",\n+ type=int,\n+ default=0,\n+ help=\"Maximum number of worker group restarts before failing.\",\n+ )\n+ parser.add_argument(\n+ \"--monitor_interval\",\n+ type=float,\n+ default=5,\n+ help=\"Interval, in seconds, to monitor the state of workers.\",\n+ )\n parser.add_argument(\n \"--main_training_function\",\n type=str,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> It looks like those new fields are not used anywhere afterwards?\r\n\r\nHello, it is being used via below lines:\r\n\r\nhttps://github.com/huggingface/accelerate/blob/4400eb90b29638deec117f95fecdcb79bd7684cb/src/accelerate/commands/launch.py#L465\r\n\r\nhttps://github.com/huggingface/accelerate/blob/4400eb90b29638deec117f95fecdcb79bd7684cb/src/accelerate/utils/launch.py#L51", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/680", "pr_id": 1046590816 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 27db19b9b..989774148 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -723,25 +723,28 @@ def _prepare_deepspeed(self, *args):\n \n deepspeed_plugin = self.state.deepspeed_plugin\n \n- result = [\n- self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n- for obj in args\n- ]\n-\n- batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n- if self.split_batches:\n- batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n- if len(batch_sizes) == 0:\n- raise ValueError(\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n- )\n+ if deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"] == \"auto\":\n+ result = [\n+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n+ for obj in args\n+ ]\n+\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ if self.split_batches:\n+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n+ if len(batch_sizes) == 0:\n+ raise ValueError(\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ )\n \n- batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n- if len(batch_sizes) > 1:\n- logger.info(\n- \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\n- f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n- )\n+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n+ if len(batch_sizes) > 1:\n+ logger.info(\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\n+ f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n+ )\n+ else:\n+ batch_size_per_device = deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]\n \n config_kwargs = {\n \"train_micro_batch_size_per_gpu\": batch_size_per_device,\n@@ -916,7 +919,9 @@ def backward(self, loss, **kwargs):\n \n Should be used in lieu of `loss.backward()`.\n \"\"\"\n- loss /= self.gradient_accumulation_steps\n+ if self.distributed_type != DistributedType.DEEPSPEED:\n+ # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`\n+ loss = loss / self.gradient_accumulation_steps\n if self.distributed_type == DistributedType.DEEPSPEED:\n self.deepspeed_engine_wrapped.backward(loss, **kwargs)\n elif self.scaler is not None:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex c7f235843..30e497e84 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -549,7 +549,8 @@ def deepspeed_launcher(args):\n current_env[\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n current_env[\"DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n current_env[\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n- current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file).lower()\n+ if args.deepspeed_config_file is not None:\n+ current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file)\n \n if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n with open(\".deepspeed_env\", \"a\") as f:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Hmm, I tried to remove the fake dataloader workaround that was discussed at https://discuss.huggingface.co/t/when-using-deepspeed-why-do-i-need-to-pass-dataloaders-to-the-accelerator-prepare/22432\r\n\r\nand it's super cumbersome. It appears that the only way to get to the batch size is from dataloader? why can't it be derived from a `batch_size` argument? or is it by design that `batch_size` is derived from dataloader?\r\n\r\nSpecifically to this PR:\r\n\r\n1. The original idea behind auto was to try to make the ds config file as simple as possible wrt to hardcoding values and have command line args set these things once. So this doesn't work:\r\n\r\n```\r\n \"train_micro_batch_size_per_gpu\": \"auto\",\r\n```\r\n\r\nTo remind, the intention of creating `auto`-values in DS config was to avoid mismatch of the same configuration value used by different sub-systems. So the way I was thinking when integrating DS in HF Trainer, is let's have one place where `batch_size` is set and then all other sub-systems would inherit that value, rather than relying on the user to remember to change the same value in many places. I hope this makes sense. I don't understand the design here so I'm surely missing something important.\r\n\r\nI was trying to remove the originally used workaround \r\nhttps://github.com/huggingface/m4/pull/610\r\nbut I think the workaround with the dummy dataloader is a way user-friendlier than the hardcoded `train_micro_batch_size_per_gpu` config value.\r\n\r\n2. Also while this feature that supports ` train_micro_batch_size_per_gpu` config was added, the end user has no idea it's supported w/o reading the source code - the error message is still:\r\n\r\n> You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\r\n\r\nShould it perhaps say:\r\n\r\n> when using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file.\r\n\r\nor something of a kind? otherwise the feature is there but nobody knows about it.\r\n\r\nThe API doc could also say that, except it's private so there is no documentation.\r\n\r\nThank you for reading.", "from_author": false }, { "body": "Hello Stas,\r\n\r\n1. Inline with the intention of using `auto` values, we use the dataloader to infer the `batch_size`, one reason reason being that `accelerator.prepare` function or `accelerator` object doesn't have access to command line arguments, i.e., `batch_size` argument from the main code. Another reason being that the single place where `batch_size` gets set is while the user creates dataloaders as part of conventional training, e.g., the user can have `batch_size` argument which they may modify because of certain custom logic and the modified batch_size now gets used to create dataloaders. As we have no control over training loop/code unlike Trainer, it makes sense to infer `batch_size` directly from dataloaders. \r\n\r\nWould the alternative suggestion work?\r\n`AcceleratorState().deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]=my_batch_size` before calling `accelerator.prepare()`?\r\n\r\n2. Thank you for the pointer on the error message, I will update it.\r\n", "from_author": true }, { "body": "Hi Sourab!\r\n\r\nIt appears that `accelerator.prepare` relies on the `deepspeed_plugin.*` settings - if those were already parsed so that any `auto`s get replaced with the proper values then it'd have access to the correct setting, no? or is it still too early in the game - perhaps it's a matter of an order of execution?\r\n\r\nPerhaps there should be another wrapper that a user should call explicitly for deepspeed with args like bs early in the code, so that no `auto` values remain and then it'd be easy to rely on the actual values later on. All the values should be available early on. e.g. in HF Trainer we only had to wait for later for `num_training_steps`:\r\nhttps://github.com/huggingface/transformers/blob/0a144b8c6bd16f7b08119a233f8e7cbd33fe5bfd/src/transformers/deepspeed.py#L167\r\n\r\n> Would the alternative suggestion work?\r\n> AcceleratorState().deepspeed_plugin.deepspeed_config[\"train_micro_batch_size_per_gpu\"]=my_batch_size before calling accelerator.prepare()?\r\n\r\nThat would definitely work.\r\n\r\nMy first reaction is that suggestion could potentially be much more problematic should the user set the value in the ds config file and it might be an unexpected override (even though if written correctly it should be the same value). Somehow this feels like replacing one hack with another hack.\r\n\r\nI think the dummy dataset wrapped dataloader is a much cleaner way over the above, especially if the code isn't necessarily always using the deepspeed backend.\r\n\r\nIf this is the best that can be done, and there is no simpler way, let's just leave it as is.\r\n", "from_author": false }, { "body": "> Perhaps there should be another wrapper that a user should call explicitly for deepspeed with args like bs early in the code, so that no `auto` values remain and then it'd be easy to rely on the actual values later on\r\n\r\nThere is already a way to do this `deepspeed_plugin.deepspeed_config_process(**kwargs)` (Process the DeepSpeed config with the values from the kwargs.): https://github.com/huggingface/accelerate/blob/main/src/accelerate/utils/dataclasses.py#L417\r\nExample below:\r\n\r\n```\r\nif AcceleratorState().deepspeed_plugin is not None:\r\n kwargs = {\r\n \"fp16.enabled\": True,\r\n \"optimizer.params.lr\": 5e-5,\r\n \"optimizer.params.weight_decay\": 0.0,\r\n \"scheduler.params.warmup_min_lr\": 0.0,\r\n \"scheduler.params.warmup_max_lr\": 5e-5,\r\n \"scheduler.params.warmup_num_steps\": 0,\r\n \"train_micro_batch_size_per_gpu\": my_batch_size,\r\n \"gradient_clipping\": 1.0,\r\n } \r\n AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)\n\n# call `accelerator.prepare` below `_` are just dummy placeholders \r\n_, _, _ = accelerator.prepare(_, _, _)\r\n\r\n...\r\n\r\n```", "from_author": true }, { "body": "should `deepspeed_config_process` call that last `accelerator.prepare` command internally? This looks super-weird with `_` input and `_` output args, unless you meant something else there.", "from_author": false }, { "body": "Hello @stas00 , in the example above, it is the user code, I was just mentioning/showcasing that `deepspeed_config_process` should be called before `accelerator.prepare`. As mentioned, `deepspeed_config_process` only sets `auto` values from the given kwargs dict, doesn't internally call `accelerator.prepare` at all and those `_` were placeholders πŸ˜…. ", "from_author": true }, { "body": "Thank you for clarifying, @pacman100! It's crystal clear now.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/676", "pr_id": 1045415772 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 9b14aeaed..a174115b1 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -59,7 +59,7 @@ def get_cluster_input():\n lambda x: int(x),\n )\n same_network = _ask_field(\n- \"Are all the machines on the same network? [YES/no]: \",\n+ \"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: \",\n _convert_yes_no_to_bool,\n default=True,\n error_message=\"Please enter yes or no.\",\n", "code_comments": [ { "body": "```suggestion\n \"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on the different network hosts [YES/no]: \",\n```\n", "diff_hunk": "@@ -59,7 +59,7 @@ def get_cluster_input():\n lambda x: int(x),\n )\n same_network = _ask_field(\n- \"Are all the machines on the same network? [YES/no]: \",\n+ \"Are all the machines on the same local network? [YES/no]: \",", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/674", "pr_id": 1045254185 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 4bc977029..409399b17 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -21,9 +21,10 @@\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed<0.7.0\", \"tqdm\"]\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n+extras[\"rich\"] = [\"rich\"]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\n-extras[\"dev\"] = extras[\"quality\"] + extras[\"testing\"]\n+extras[\"dev\"] = extras[\"quality\"] + extras[\"testing\"] + extras[\"rich\"]\n \n extras[\"sagemaker\"] = [\n \"sagemaker\", # boto3 is a required package in sagemaker\n@@ -50,7 +51,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex 03a7198ac..10c258efa 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -16,7 +16,11 @@\n InitProcessGroupKwargs,\n find_executable_batch_size,\n infer_auto_device_map,\n+ is_rich_available,\n load_checkpoint_in_model,\n- rich,\n synchronize_rng_states,\n )\n+\n+\n+if is_rich_available():\n+ from .utils import rich\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 491ea5ad3..c7f235843 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -39,22 +39,25 @@\n _filter_args,\n get_launch_prefix,\n is_deepspeed_available,\n+ is_rich_available,\n is_sagemaker_available,\n is_torch_version,\n patch_environment,\n )\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from accelerate.utils.dataclasses import SageMakerDistributedType\n-from rich import get_console\n-from rich.logging import RichHandler\n \n \n-if is_torch_version(\">=\", \"1.9.0\"):\n- import torch.distributed.run as distrib_run\n+if is_rich_available():\n+ from rich import get_console\n+ from rich.logging import RichHandler\n+\n+ FORMAT = \"%(message)s\"\n+ logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n \n \n-FORMAT = \"%(message)s\"\n-logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n+if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n \n logger = logging.getLogger(__name__)\n \ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 2b8f566f0..6ec2f4ac4 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -27,6 +27,7 @@\n is_comet_ml_available,\n is_datasets_available,\n is_deepspeed_available,\n+ is_rich_available,\n is_sagemaker_available,\n is_tensorboard_available,\n is_tpu_available,\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 3efb61034..6015f2c14 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -111,6 +111,10 @@ def is_boto3_available():\n return importlib.util.find_spec(\"boto3\") is not None\n \n \n+def is_rich_available():\n+ return importlib.util.find_spec(\"rich\") is not None\n+\n+\n def is_sagemaker_available():\n return importlib.util.find_spec(\"sagemaker\") is not None\n \ndiff --git a/src/accelerate/utils/rich.py b/src/accelerate/utils/rich.py\nindex 7a54c8b5a..2d48661b7 100644\n--- a/src/accelerate/utils/rich.py\n+++ b/src/accelerate/utils/rich.py\n@@ -12,7 +12,13 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from rich.traceback import install\n+from .imports import is_rich_available\n \n \n-install(show_locals=False)\n+if is_rich_available():\n+ from rich.traceback import install\n+\n+ install(show_locals=False)\n+\n+else:\n+ raise ModuleNotFoundError(\"To use the rich extension, install rich with `pip install rich`\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/673", "pr_id": 1045217690 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 65c0e56b2..9b14aeaed 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -38,6 +38,7 @@ def get_cluster_input():\n main_process_ip = None\n main_process_port = None\n rdzv_backend = \"static\"\n+ same_network = True\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n@@ -57,9 +58,16 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n- rdzv_backend = _ask_field(\n- \"What rendezvous backend will you use? ('static', 'c10d', ...)\", default=\"static\"\n+ same_network = _ask_field(\n+ \"Are all the machines on the same network? [YES/no]: \",\n+ _convert_yes_no_to_bool,\n+ default=True,\n+ error_message=\"Please enter yes or no.\",\n )\n+ if not same_network:\n+ rdzv_backend = _ask_field(\n+ \"What rendezvous backend will you use? ('static', 'c10d', ...): \", default=\"static\"\n+ )\n \n if distributed_type == DistributedType.NO:\n use_cpu = _ask_field(\n@@ -328,4 +336,5 @@ def get_cluster_input():\n fsdp_config=fsdp_config,\n use_cpu=use_cpu,\n rdzv_backend=rdzv_backend,\n+ same_network=same_network,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex e78aad181..68da64963 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -138,6 +138,7 @@ class ClusterConfig(BaseConfig):\n main_process_ip: Optional[str] = None\n main_process_port: Optional[int] = None\n rdzv_backend: Optional[str] = \"static\"\n+ same_network: Optional[bool] = False\n main_training_function: str = \"main\"\n \n # args for deepspeed_plugin\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fcc80bb7f..491ea5ad3 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -375,15 +375,21 @@ def simple_launcher(args):\n def multi_gpu_launcher(args):\n num_processes = getattr(args, \"num_processes\")\n num_machines = getattr(args, \"num_machines\")\n+ main_process_ip = getattr(args, \"main_process_ip\")\n+ main_process_port = getattr(args, \"main_process_port\")\n if num_machines > 1:\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n setattr(args, \"nnodes\", str(num_machines))\n- setattr(args, \"node_rank\", str(args.machine_rank))\n- setattr(args, \"rdzv_endpoint\", f\"{args.main_process_ip}:{args.main_process_port}\")\n+ setattr(args, \"node_rank\", int(args.machine_rank))\n+ if getattr(args, \"same_network\"):\n+ setattr(args, \"master_addr\", str(main_process_ip))\n+ setattr(args, \"master_port\", str(main_process_port))\n+ else:\n+ setattr(args, \"rdzv_endpoint\", f\"{main_process_ip}:{main_process_port}\")\n else:\n setattr(args, \"nproc_per_node\", str(num_processes))\n- if args.main_process_port is not None:\n- setattr(args, \"master_port\", str(args.main_process_port))\n+ if main_process_port is not None:\n+ setattr(args, \"master_port\", str(main_process_port))\n \n if args.module and args.no_python:\n raise ValueError(\"--module and --no_python cannot be used together\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/672", "pr_id": 1045197885 }, { "diff": "diff --git a/docs/source/usage_guides/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\nindex 9b3252809..1e13849c6 100644\n--- a/docs/source/usage_guides/big_modeling.mdx\n+++ b/docs/source/usage_guides/big_modeling.mdx\n@@ -35,7 +35,13 @@ While this works very well for regularly sized models, this workflow has some cl\n \n </Tip>\n \n-## Instantiating an empty model\n+## How the Process Works: A Quick Overview\n+\n+<Youtube id=\"MWCSGj9jEAo\" />\n+\n+## How the Process Works: Working with Code\n+\n+### Instantiating an empty model\n \n The first tool πŸ€— Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:\n \n@@ -61,7 +67,7 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen\n \n </Tip>\n \n-## Sharded checkpoints\n+### Sharded checkpoints\n \n It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split in several smaller files that we call checkpoint shards.\n \n@@ -86,7 +92,7 @@ with index.json being the following file:\n \n and `first_state_dict.bin` containing the weights for `\"linear1.weight\"` and `\"linear1.bias\"`, `second_state_dict.bin` the ones for `\"linear2.weight\"` and `\"linear2.bias\"`\n \n-## Loading weights\n+### Loading weights\n \n The second tool πŸ€— Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.\n \n@@ -176,7 +182,7 @@ You can also design your `device_map` yourself, if you prefer to explicitly deci\n model = load_checkpoint_and_dispatch(model, \"sharded-gpt-j-6B\", device_map=my_device_map)\n ```\n \n-## Run the model\n+### Run the model\n \n Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model:\n \n@@ -203,7 +209,7 @@ This way, you model can run for inference even if it doesn't fit on one of the G\n \n </Tip>\n \n-## Designing a device map\n+### Designing a device map\n \n You can let πŸ€— Accelerate handle the device map computation by setting `device_map` to one of the supported options (`\"auto\"`, `\"balanced\"`, `\"balanced_low_0\"`, `\"sequential\"`) or create one yourself, if you want more control over where each layer should go.\n \ndiff --git a/manim_animations/big_model_inference/stage_1.py b/manim_animations/big_model_inference/stage_1.py\nnew file mode 100644\nindex 000000000..81ec0c965\n--- /dev/null\n+++ b/manim_animations/big_model_inference/stage_1.py\n@@ -0,0 +1,108 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from manim import *\n+\n+\n+class Stage1(Scene):\n+ def construct(self):\n+ mem = Rectangle(height=0.5,width=0.5)\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\n+\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\n+ cpu_text = Text(\"CPU\", font_size=24)\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ cpu.move_to([-2.5,-.5,0])\n+ self.add(cpu)\n+\n+ gpu_base = [mem.copy() for i in range(1)]\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\n+ gpu_text = Text(\"GPU\", font_size=24)\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ gpu.align_to(cpu, DOWN)\n+ gpu.set_x(gpu.get_x() - 1)\n+ \n+ self.add(gpu)\n+\n+ model_base = [mem.copy() for i in range(6)]\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\n+\n+ model_text = Text(\"Model\", font_size=24)\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ model.move_to([3, -1., 0])\n+ \n+ self.play(\n+ Create(cpu_left_col, run_time=1),\n+ Create(cpu_right_col, run_time=1),\n+ Create(gpu_rect, run_time=1),\n+ )\n+\n+ step_1 = MarkupText(\n+ f\"First, an empty model skeleton is loaded\\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.\", \n+ font_size=24\n+ )\n+\n+ key = Square(side_length=2.2)\n+ key.move_to([-5, 2, 0])\n+\n+ key_text = MarkupText(\n+ f\"<b>Key:</b>\\n\\n<span fgcolor='{YELLOW}'>●</span> Empty Model\",\n+ font_size=18,\n+ )\n+\n+ key_text.move_to([-5, 2.4, 0])\n+\n+\n+ step_1.move_to([2, 2, 0])\n+ self.play(\n+ Write(step_1, run_time=2.5),\n+ Write(key_text),\n+ Write(key)\n+ )\n+\n+ self.add(model)\n+ \n+\n+ cpu_targs = []\n+ first_animations = []\n+ second_animations = []\n+ for i,rect in enumerate(model_base):\n+\n+ cpu_target = Rectangle(height=0.46,width=0.46).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\n+ cpu_target.move_to(rect)\n+ cpu_target.generate_target()\n+ cpu_target.target.height = 0.46/4\n+ cpu_target.target.width = 0.46/3\n+ \n+ if i == 0:\n+ cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\n+ cpu_target.target.set_x(cpu_target.target.get_x()+0.1)\n+ elif i == 3:\n+ cpu_target.target.next_to(cpu_targs[0].target, direction=UP, buff=0.)\n+ else:\n+ cpu_target.target.next_to(cpu_targs[i-1].target, direction=RIGHT, buff=0.)\n+ cpu_targs.append(cpu_target)\n+\n+ first_animations.append(rect.animate(run_time=0.5).set_stroke(YELLOW))\n+ second_animations.append(MoveToTarget(cpu_target, run_time=1.5))\n+\n+ self.play(*first_animations)\n+ self.play(*second_animations)\n+ \n+\n+ self.wait()\n\\ No newline at end of file\ndiff --git a/manim_animations/big_model_inference/stage_2.py b/manim_animations/big_model_inference/stage_2.py\nnew file mode 100644\nindex 000000000..a30e9593b\n--- /dev/null\n+++ b/manim_animations/big_model_inference/stage_2.py\n@@ -0,0 +1,126 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from manim import *\n+\n+class Stage2(Scene):\n+ def construct(self):\n+ mem = Rectangle(height=0.5,width=0.5)\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\n+\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\n+ cpu_text = Text(\"CPU\", font_size=24)\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ cpu.move_to([-2.5,-.5,0])\n+ self.add(cpu)\n+\n+ gpu_base = [mem.copy() for i in range(4)]\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\n+ gpu_text = Text(\"GPU\", font_size=24)\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ gpu.move_to([-1,-1,0])\n+ self.add(gpu)\n+\n+ model_base = [mem.copy() for i in range(6)]\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\n+\n+ model_text = Text(\"Model\", font_size=24)\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ model.move_to([3, -1., 0])\n+ self.add(model)\n+ \n+ cpu_targs = []\n+ for i,rect in enumerate(model_base):\n+ rect.set_stroke(YELLOW)\n+ # target = fill.copy().set_fill(YELLOW, opacity=0.7)\n+ # target.move_to(rect)\n+ # self.add(target)\n+\n+ cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\n+ \n+ if i == 0:\n+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\n+ cpu_target.set_x(cpu_target.get_x()+0.1)\n+ elif i == 3:\n+ cpu_target.next_to(cpu_targs[0], direction=UP, buff=0.)\n+ else:\n+ cpu_target.next_to(cpu_targs[i-1], direction=RIGHT, buff=0.)\n+ self.add(cpu_target)\n+ cpu_targs.append(cpu_target)\n+\n+ \n+\n+ checkpoint_base = [mem.copy() for i in range(6)]\n+ checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0)\n+\n+ checkpoint_text = Text(\"Loaded Checkpoint\", font_size=24)\n+ checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, aligned_edge=DOWN, buff=0.4)\n+ checkpoint.move_to([3, .5, 0])\n+ \n+ key = Square(side_length=2.2)\n+ key.move_to([-5, 2, 0])\n+\n+ key_text = MarkupText(\n+ f\"<b>Key:</b>\\n\\n<span fgcolor='{YELLOW}'>●</span> Empty Model\",\n+ font_size=18,\n+ )\n+\n+ key_text.move_to([-5, 2.4, 0])\n+\n+ self.add(key_text, key)\n+\n+ blue_text = MarkupText(\n+ f\"<span fgcolor='{BLUE}'>●</span> Checkpoint\",\n+ font_size=18,\n+ )\n+\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\n+\n+ step_2 = MarkupText(\n+ f'Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.', \n+ font_size=24\n+ )\n+ step_2.move_to([2, 2, 0])\n+ self.play(\n+ Write(step_2),\n+ Write(blue_text)\n+ )\n+\n+ self.play(\n+ Write(checkpoint_text, run_time=1),\n+ Create(checkpoint_rect, run_time=1)\n+ )\n+\n+ first_animations = []\n+ second_animations = []\n+ for i,rect in enumerate(checkpoint_base):\n+ target = fill.copy().set_fill(BLUE, opacity=0.7)\n+ target.move_to(rect)\n+ first_animations.append(GrowFromCenter(target, run_time=1))\n+\n+ cpu_target = target.copy()\n+ cpu_target.generate_target()\n+ if i < 5:\n+ cpu_target.target.move_to(cpu_left_col_base[i+1])\n+ else:\n+ cpu_target.target.move_to(cpu_right_col_base[i-5])\n+ second_animations.append(MoveToTarget(cpu_target, run_time=1.5))\n+ \n+ self.play(*first_animations)\n+ self.play(*second_animations)\n+ self.wait()\n\\ No newline at end of file\ndiff --git a/manim_animations/big_model_inference/stage_3.py b/manim_animations/big_model_inference/stage_3.py\nnew file mode 100644\nindex 000000000..4ba20c4b5\n--- /dev/null\n+++ b/manim_animations/big_model_inference/stage_3.py\n@@ -0,0 +1,158 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from manim import *\n+\n+class Stage3(Scene):\n+ def construct(self):\n+ mem = Rectangle(height=0.5,width=0.5)\n+ meta_mem = Rectangle(height=0.25,width=0.25)\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\n+\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\n+ cpu_text = Text(\"CPU\", font_size=24)\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ cpu.move_to([-2.5,-.5,0])\n+ self.add(cpu)\n+\n+ gpu_base = [mem.copy() for i in range(4)]\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\n+ gpu_text = Text(\"GPU\", font_size=24)\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ gpu.move_to([-1,-1,0])\n+ self.add(gpu)\n+\n+ model_base = [mem.copy() for i in range(6)]\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\n+\n+ model_text = Text(\"Model\", font_size=24)\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ model.move_to([3, -1., 0])\n+ self.add(model)\n+\n+ model_arr = []\n+ model_cpu_arr = []\n+ model_meta_arr = []\n+ \n+ for i,rect in enumerate(model_base):\n+ rect.set_stroke(YELLOW)\n+\n+ cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\n+ \n+ if i == 0:\n+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\n+ cpu_target.set_x(cpu_target.get_x()+0.1)\n+ elif i == 3:\n+ cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.)\n+ else:\n+ cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.)\n+ self.add(cpu_target)\n+ model_cpu_arr.append(cpu_target)\n+\n+ self.add(*model_arr, *model_cpu_arr, *model_meta_arr)\n+\n+ checkpoint_base = [mem.copy() for i in range(6)]\n+ checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0)\n+\n+ checkpoint_text = Text(\"Loaded Checkpoint\", font_size=24)\n+ checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ checkpoint.move_to([3, .5, 0])\n+ \n+ self.add(checkpoint)\n+\n+ ckpt_arr = []\n+ ckpt_cpu_arr = []\n+\n+ for i,rect in enumerate(checkpoint_base):\n+ target = fill.copy().set_fill(BLUE, opacity=0.7)\n+ target.move_to(rect)\n+ ckpt_arr.append(target)\n+\n+ cpu_target = target.copy()\n+ if i < 5:\n+ cpu_target.move_to(cpu_left_col_base[i+1])\n+ else:\n+ cpu_target.move_to(cpu_right_col_base[i-5])\n+ ckpt_cpu_arr.append(cpu_target)\n+ self.add(*ckpt_arr, *ckpt_cpu_arr)\n+\n+ key = Square(side_length=2.2)\n+ key.move_to([-5, 2, 0])\n+\n+ key_text = MarkupText(\n+ f\"<b>Key:</b>\\n\\n<span fgcolor='{YELLOW}'>●</span> Empty Model\",\n+ font_size=18,\n+ )\n+\n+ key_text.move_to([-5, 2.4, 0])\n+\n+ self.add(key_text, key)\n+\n+ blue_text = MarkupText(\n+ f\"<span fgcolor='{BLUE}'>●</span> Checkpoint\",\n+ font_size=18,\n+ )\n+\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\n+ self.add(blue_text)\n+\n+ step_3 = MarkupText(\n+ f'Based on the passed in configuration, weights are stored in\\na variety of np.memmaps on disk or to a particular device.', \n+ font_size=24\n+ )\n+ step_3.move_to([2, 2, 0])\n+\n+ disk_left_col_base = [meta_mem.copy() for i in range(6)]\n+ disk_right_col_base = [meta_mem.copy() for i in range(6)]\n+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)\n+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)\n+ disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)\n+ disk_text = Text(\"Disk\", font_size=24)\n+ disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ disk.move_to([-4.,-1.25,0])\n+ self.play(\n+ Write(step_3, run_time=3),\n+ Write(disk_text, run_time=1),\n+ Create(disk_rects, run_time=1)\n+ )\n+\n+ animations = []\n+ for i,rect in enumerate(ckpt_cpu_arr):\n+ target = rect.copy()\n+ target.generate_target()\n+ target.target.move_to(disk_left_col_base[i]).scale(0.5)\n+ animations.append(MoveToTarget(target, run_time=1.5))\n+ self.play(*animations)\n+\n+ self.play(FadeOut(step_3))\n+\n+ step_4 = MarkupText(\n+ f'Then, the checkpoint is removed from memory\\nthrough garbage collection.', \n+ font_size=24\n+ )\n+ step_4.move_to([2, 2, 0])\n+\n+ self.play(\n+ Write(step_4, run_time=3)\n+ )\n+\n+ self.play(\n+ FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr),\n+ )\n+\n+ self.wait() \n\\ No newline at end of file\ndiff --git a/manim_animations/big_model_inference/stage_4.py b/manim_animations/big_model_inference/stage_4.py\nnew file mode 100644\nindex 000000000..3a79ad97c\n--- /dev/null\n+++ b/manim_animations/big_model_inference/stage_4.py\n@@ -0,0 +1,156 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from manim import *\n+\n+class Stage4(Scene):\n+ def construct(self):\n+ mem = Rectangle(height=0.5,width=0.5)\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\n+ meta_mem = Rectangle(height=0.25,width=0.25)\n+\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\n+ cpu_text = Text(\"CPU\", font_size=24)\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ cpu.move_to([-2.5,-.5,0])\n+ self.add(cpu)\n+\n+ gpu_base = [mem.copy() for i in range(4)]\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\n+ gpu_text = Text(\"GPU\", font_size=24)\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ gpu.move_to([-1,-1,0])\n+ self.add(gpu)\n+\n+ model_base = [mem.copy() for i in range(6)]\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\n+\n+ model_text = Text(\"Model\", font_size=24)\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ model.move_to([3, -1., 0])\n+ self.add(model)\n+\n+ model_cpu_arr = []\n+ model_meta_arr = []\n+ \n+ for i,rect in enumerate(model_base):\n+ rect.set_stroke(YELLOW)\n+\n+ cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)\n+ \n+ if i == 0:\n+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)\n+ cpu_target.set_x(cpu_target.get_x()+0.1)\n+ elif i == 3:\n+ cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.)\n+ else:\n+ cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.)\n+ self.add(cpu_target)\n+ model_cpu_arr.append(cpu_target)\n+\n+ self.add(*model_cpu_arr, *model_meta_arr)\n+\n+ disk_left_col_base = [meta_mem.copy() for i in range(6)]\n+ disk_right_col_base = [meta_mem.copy() for i in range(6)]\n+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)\n+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)\n+ disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)\n+ disk_text = Text(\"Disk\", font_size=24)\n+ disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ disk.move_to([-4.,-1.25,0])\n+ self.add(disk_text, disk_rects)\n+\n+ cpu_disk_arr = []\n+\n+ for i in range(6):\n+ target = fill.copy().set_fill(BLUE, opacity=0.8)\n+ target.move_to(disk_left_col_base[i]).scale(0.5)\n+ cpu_disk_arr.append(target)\n+\n+ self.add(*cpu_disk_arr)\n+\n+ key = Square(side_length=2.2)\n+ key.move_to([-5, 2, 0])\n+\n+ key_text = MarkupText(\n+ f\"<b>Key:</b>\\n\\n<span fgcolor='{YELLOW}'>●</span> Empty Model\",\n+ font_size=18,\n+ )\n+\n+ key_text.move_to([-5, 2.4, 0])\n+\n+ self.add(key_text, key)\n+\n+ blue_text = MarkupText(\n+ f\"<span fgcolor='{BLUE}'>●</span> Checkpoint\",\n+ font_size=18,\n+ )\n+\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\n+ self.add(blue_text)\n+\n+ step_5 = MarkupText(\n+ f'The offloaded weights are all sent to the CPU.', \n+ font_size=24\n+ )\n+ step_5.move_to([2, 2, 0])\n+\n+ self.play(Write(step_5, run_time=3))\n+\n+ for i in range(6):\n+ rect = cpu_disk_arr[i]\n+ cp2 = rect.copy().set_fill(BLUE, opacity=0.8).scale(2.0)\n+ cp2.generate_target()\n+ cp2.target.move_to(model_base[i])\n+\n+ if i == 0:\n+ rect.set_fill(BLUE, opacity=0.8)\n+ rect.generate_target()\n+ rect.target.move_to(cpu_left_col_base[0]).scale(2.0)\n+ \n+ self.remove(*model_meta_arr, \n+ *model_cpu_arr,\n+ )\n+\n+ else:\n+ rect.generate_target()\n+ rect.target.move_to(cpu_left_col_base[i]).scale(2.0)\n+ self.play(\n+ MoveToTarget(rect),\n+ MoveToTarget(cp2),\n+ model_base[i].animate.set_stroke(WHITE)\n+ )\n+ self.play(FadeOut(step_5))\n+\n+ step_5 = MarkupText(\n+ f'Finally, hooks are added to each weight in the model\\nto transfer the weights from CPU to GPU\\n\\t\\tand back when needed.', \n+ font_size=24\n+ )\n+ step_5.move_to([2, 2, 0])\n+\n+ self.play(Write(step_5, run_time=3))\n+\n+ arrows = []\n+ animations = []\n+ for i in range(6):\n+ a = Arrow(start=UP, end=DOWN, color=RED, buff=.5)\n+ a.next_to(model_base[i].get_left(), UP, buff=0.2)\n+ arrows.append(a)\n+ animations.append(Write(a))\n+ self.play(*animations)\n+ self.wait() \n\\ No newline at end of file\ndiff --git a/manim_animations/big_model_inference/stage_5.py b/manim_animations/big_model_inference/stage_5.py\nnew file mode 100644\nindex 000000000..8b2ff3357\n--- /dev/null\n+++ b/manim_animations/big_model_inference/stage_5.py\n@@ -0,0 +1,221 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from manim import *\n+\n+class Stage5(Scene):\n+ def construct(self):\n+ mem = Rectangle(height=0.5,width=0.5)\n+ fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)\n+\n+ meta_mem = Rectangle(height=0.25,width=0.25)\n+\n+ cpu_left_col_base = [mem.copy() for i in range(6)]\n+ cpu_right_col_base = [mem.copy() for i in range(6)]\n+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)\n+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)\n+ cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)\n+ cpu_text = Text(\"CPU\", font_size=24)\n+ cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ cpu.move_to([-2.5,-.5,0])\n+ self.add(cpu)\n+\n+ gpu_base = [mem.copy() for i in range(4)]\n+ gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)\n+ gpu_text = Text(\"GPU\", font_size=24)\n+ gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ gpu.move_to([-1,-1,0])\n+ self.add(gpu)\n+\n+ model_base = [mem.copy() for i in range(6)]\n+ model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)\n+\n+ model_text = Text(\"Model\", font_size=24)\n+ model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ model.move_to([3, -1., 0])\n+ self.add(model)\n+\n+ model_arr = []\n+ model_cpu_arr = []\n+ \n+ for i,rect in enumerate(model_base):\n+ target = fill.copy().set_fill(BLUE, opacity=0.8)\n+ target.move_to(rect)\n+ model_arr.append(target)\n+\n+ cpu_target = Rectangle(height=0.46,width=0.46).set_stroke(width=0.).set_fill(BLUE, opacity=0.8)\n+ cpu_target.move_to(cpu_left_col_base[i])\n+ model_cpu_arr.append(cpu_target)\n+\n+ self.add(*model_arr, *model_cpu_arr)\n+\n+ disk_left_col_base = [meta_mem.copy() for i in range(6)]\n+ disk_right_col_base = [meta_mem.copy() for i in range(6)]\n+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)\n+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)\n+ disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)\n+ disk_text = Text(\"Disk\", font_size=24)\n+ disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)\n+ disk.move_to([-4,-1.25,0])\n+ self.add(disk_text, disk_rects)\n+\n+ key = Square(side_length=2.2)\n+ key.move_to([-5, 2, 0])\n+\n+ key_text = MarkupText(\n+ f\"<b>Key:</b>\\n\\n<span fgcolor='{YELLOW}'>●</span> Empty Model\",\n+ font_size=18,\n+ )\n+\n+ key_text.move_to([-5, 2.4, 0])\n+\n+ self.add(key_text, key)\n+\n+ blue_text = MarkupText(\n+ f\"<span fgcolor='{BLUE}'>●</span> Checkpoint\",\n+ font_size=18,\n+ )\n+\n+ blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())\n+ self.add(blue_text)\n+\n+ step_6 = MarkupText(\n+ f'Now watch as an input is passed through the model\\nand how the memory is utilized and handled.', \n+ font_size=24\n+ )\n+ step_6.move_to([2, 2, 0])\n+\n+ self.play(Write(step_6))\n+\n+ input = Square(0.3)\n+ input.set_fill(RED, opacity=1.)\n+ input.set_stroke(width=0.)\n+ input.next_to(model_base[0], LEFT, buff=.5)\n+\n+ self.play(Write(input))\n+\n+ input.generate_target()\n+ input.target.next_to(model_arr[0], direction=LEFT, buff=0.02)\n+ self.play(MoveToTarget(input))\n+\n+ self.play(FadeOut(step_6))\n+\n+\n+ a = Arrow(start=UP, end=DOWN, color=RED, buff=.5)\n+ a.next_to(model_arr[0].get_left(), UP, buff=0.2)\n+\n+ model_cpu_arr[0].generate_target()\n+ model_cpu_arr[0].target.move_to(gpu_rect[0])\n+\n+ step_7 = MarkupText(\n+ f'As the input reaches a layer, the hook triggers\\nand weights are moved from the CPU\\nto the GPU and back.', \n+ font_size=24\n+ )\n+ step_7.move_to([2, 2, 0])\n+\n+ self.play(Write(step_7, run_time=3))\n+\n+ circ_kwargs = {\"run_time\":1, \"fade_in\":True, \"fade_out\":True, \"buff\":0.02}\n+\n+ self.play(\n+ Write(a), \n+ Circumscribe(model_arr[0], color=ORANGE, **circ_kwargs),\n+ Circumscribe(model_cpu_arr[0], color=ORANGE, **circ_kwargs),\n+ Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs),\n+ )\n+ self.play(\n+ MoveToTarget(model_cpu_arr[0])\n+ )\n+\n+ a_c = a.copy()\n+ for i in range(6):\n+ a_c.next_to(model_arr[i].get_right()+0.02, UP, buff=0.2)\n+\n+ input.generate_target()\n+ input.target.move_to(model_arr[i].get_right()+0.02)\n+\n+ grp = AnimationGroup(\n+ FadeOut(a, run_time=.5), \n+ MoveToTarget(input, run_time=.5), \n+ FadeIn(a_c, run_time=.5),\n+ lag_ratio=0.2\n+ )\n+\n+ self.play(grp)\n+\n+\n+ model_cpu_arr[i].generate_target()\n+ model_cpu_arr[i].target.move_to(cpu_left_col_base[i])\n+\n+\n+ if i < 5:\n+ model_cpu_arr[i+1].generate_target()\n+ model_cpu_arr[i+1].target.move_to(gpu_rect[0])\n+ if i >= 1:\n+ circ_kwargs[\"run_time\"] = .7\n+\n+ self.play(\n+ Circumscribe(model_arr[i], **circ_kwargs),\n+ Circumscribe(cpu_left_col_base[i], **circ_kwargs),\n+ Circumscribe(cpu_left_col_base[i+1], color=ORANGE, **circ_kwargs), \n+ Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs),\n+ Circumscribe(model_arr[i+1], color=ORANGE, **circ_kwargs),\n+ )\n+ if i < 1:\n+ self.play(\n+ MoveToTarget(model_cpu_arr[i]), \n+ MoveToTarget(model_cpu_arr[i+1]),\n+ )\n+ else:\n+ self.play(\n+ MoveToTarget(model_cpu_arr[i], run_time=.7), \n+ MoveToTarget(model_cpu_arr[i+1], run_time=.7),\n+ )\n+ else:\n+ model_cpu_arr[i].generate_target()\n+ model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])\n+ input.generate_target()\n+ input.target.next_to(model_arr[-1].get_right(), RIGHT+0.02, buff=0.2)\n+\n+ self.play(\n+ Circumscribe(model_arr[-1], color=ORANGE, **circ_kwargs),\n+ Circumscribe(cpu_left_col_base[-1], color=ORANGE, **circ_kwargs),\n+ Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs),\n+ )\n+\n+ self.play(\n+ MoveToTarget(model_cpu_arr[i])\n+ )\n+\n+ a = a_c\n+ a_c = a_c.copy()\n+\n+ input.generate_target()\n+ input.target.next_to(model_base[-1], RIGHT+0.02, buff=.5)\n+ self.play(\n+ FadeOut(step_7),\n+ FadeOut(a, run_time=.5), \n+ )\n+\n+ step_8 = MarkupText(\n+ f'Inference on a model too large for GPU memory\\nis successfully completed.', font_size=24\n+ )\n+ step_8.move_to([2, 2, 0])\n+\n+ self.play(\n+ Write(step_8, run_time=3),\n+ MoveToTarget(input)\n+ )\n+\n+ self.wait()\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/671", "pr_id": 1044967618 }, { "diff": "diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 074d02e4a..3efb61034 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -14,6 +14,7 @@\n \n import importlib\n import sys\n+from functools import lru_cache\n \n import torch\n \n@@ -50,6 +51,7 @@ def is_apex_available():\n return importlib.util.find_spec(\"apex\") is not None\n \n \n+@lru_cache()\n def is_tpu_available(check_device=True):\n \"Checks if `torch_xla` is installed and potentially if a TPU is in the environment\"\n if _tpu_available and check_device:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/670", "pr_id": 1043736990 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 8160cb4ee..6b9825523 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -35,6 +35,8 @@\n title: Using πŸ€— Accelerate on SageMaker\n - local: usage_guides/mps\n title: How to use Apple Silicon M1 GPUs\n+ - local: usage_guides/megatron_lm\n+ title: How to use Megatron-LM\n - local: usage_guides/training_zoo\n title: πŸ€— Accelerate Example Zoo\n title: How-To Guides\n@@ -71,4 +73,6 @@\n title: Kwargs handlers\n - local: package_reference/utilities\n title: Utility functions and classes\n+ - local: package_reference/megatron_lm\n+ title: Megatron-LM Utilities\n title: \"Reference\"\n\\ No newline at end of file\ndiff --git a/docs/source/package_reference/megatron_lm.mdx b/docs/source/package_reference/megatron_lm.mdx\nnew file mode 100644\nindex 000000000..b59768bec\n--- /dev/null\n+++ b/docs/source/package_reference/megatron_lm.mdx\n@@ -0,0 +1,29 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Utilities for Megatron-LM\n+\n+[[autodoc]] utils.MegatronLMPlugin\n+\n+[[autodoc]] utils.MegatronLMDummyScheduler\n+\n+[[autodoc]] utils.MegatronLMDummyDataLoader\n+\n+[[autodoc]] utils.AbstractTrainStep\n+\n+[[autodoc]] utils.GPTTrainStep\n+\n+[[autodoc]] utils.BertTrainStep\n+\n+[[autodoc]] utils.T5TrainStep\n+\n+[[autodoc]] utils.avg_losses_across_data_parallel_group\ndiff --git a/docs/source/usage_guides/megatron_lm.mdx b/docs/source/usage_guides/megatron_lm.mdx\nnew file mode 100644\nindex 000000000..188dddb32\n--- /dev/null\n+++ b/docs/source/usage_guides/megatron_lm.mdx\n@@ -0,0 +1,507 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+\n+# Megatron-LM\n+\n+[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.\n+It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based\n+Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).\n+For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).\n+\n+## What is integrated?\n+\n+Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n+of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n+\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+\n+b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n+Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n+Layers are distributed uniformly across PP stages.\n+\n+c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n+\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.\n+\n+e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing.\n+It doesn't store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation.\n+\n+f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.\n+PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition.\n+\n+g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format.\n+\n+h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable \n+tensor and pipeline parallel sizes to the beloved πŸ€— Transformers sharded checkpoints as it has great support with plethora of tools\n+such as πŸ€— Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. \n+Support is also available for converting πŸ€— Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes\n+for large scale training. \n+\n+\n+## Pre-Requisites \n+\n+You will need to install the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases and the nltk library.\n+See [documentation](https://github.com/NVIDIA/Megatron-LM#setup) for more details. \n+Another way to setup the environment is to pull an NVIDIA PyTorch Container that comes with all the required installations from NGC.\n+\n+Below is a step-by-step method to set up the conda environment:\n+\n+1. Create a virtual environment\n+```\n+conda create --name ml\n+```\n+\n+2. Assuming that the machine has CUDA 11.3 installed, installing the corresponding PyTorch GPU Version\n+```\n+conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch\n+```\n+\n+3. Install Nvidia APEX\n+```\n+git clone https://github.com/NVIDIA/apex\n+cd apex\n+pip install -v --disable-pip-version-check --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./\n+cd ..\n+```\n+\n+4. Installing Megatron-LM\n+\n+ a. Cloning the Megatron-LM repo\n+ ```\n+ git clone https://github.com/NVIDIA/Megatron-LM.git\n+ cd Megatron-LM\n+ ```\n+\n+ b. Create a file `setup.py`, paste the below code and put in the root folder\n+ ```python\n+ \"\"\"Setup for pip package.\"\"\"\n+\n+ import os\n+ import sys\n+ import setuptools\n+\n+ if sys.version_info < (3,):\n+ raise Exception(\"Python 2 is not supported by Megatron.\")\n+\n+ with open(\"README.md\", \"r\") as fh:\n+ long_description = fh.read()\n+\n+ setuptools.setup(\n+ name=\"megatron-lm\",\n+ version=\"3.0.0\",\n+ description=\"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism.\",\n+ long_description=long_description,\n+ long_description_content_type=\"text/markdown\",\n+ # The project's main homepage.\n+ url=\"https://github.com/NVIDIA/Megatron-LM\",\n+ author=\"NVIDIA INC\",\n+ maintainer=\"NVIDIA INC\",\n+ # The licence under which the project is released\n+ license=\"See https://github.com/NVIDIA/Megatron-LM/blob/master/LICENSE\",\n+ classifiers=[\n+ \"Intended Audience :: Developers\",\n+ \"Intended Audience :: Science/Research\",\n+ \"Intended Audience :: Information Technology\",\n+ # Indicate what your project relates to\n+ \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n+ \"Topic :: Software Development :: Libraries :: Python Modules\",\n+ # Additional Setting\n+ \"Environment :: Console\",\n+ \"Natural Language :: English\",\n+ \"Operating System :: OS Independent\",\n+ ],\n+ python_requires=\">=3.6\",\n+ packages=setuptools.find_packages(),\n+ install_requires=[\"nltk\", \"six\", \"regex\", \"torch>=1.12.0\", \"pybind11\"],\n+ # Add in any packaged data.\n+ include_package_data=True,\n+ zip_safe=False,\n+ # PyPI package information.\n+ keywords=\"deep learning, Megatron, gpu, NLP, nvidia, pytorch, torch, language\",\n+ )\n+ ```\n+\n+ c. installing via below command\n+ ```\n+ pip install \".\"\n+ ```\n+\n+## Accelerate Megatron-LM Plugin\n+\n+Important features are directly supported via the `accelerate config` command. \n+An example of thr corresponding questions for using Megatron-LM features is shown below:\n+\n+```bash\n+:~$ accelerate config --config_file \"megatron_gpt_config.yaml\"\n+In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0\n+Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): 2\n+How many different machines will you use (use more than 1 for multi-node training)? [1]: \n+Do you want to use DeepSpeed? [yes/NO]: \n+Do you want to use FullyShardedDataParallel? [yes/NO]: \n+Do you want to use Megatron-LM ? [yes/NO]: yes\n+What is the Tensor Parallelism degree/size? [1]:2\n+Do you want to enable Sequence Parallelism? [YES/no]: \n+What is the Pipeline Parallelism degree/size? [1]:2\n+What is the number of micro-batches? [1]:2\n+Do you want to enable selective activation recomputation? [YES/no]: \n+Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]: \n+What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \n+How many GPU(s) should be used for distributed training? [1]:8\n+Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16\n+```\n+\n+The resulting config is shown below:\n+\n+```\n+~$ cat megatron_gpt_config.yaml \n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MEGATRON_LM\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+megatron_lm_config:\n+ megatron_lm_gradient_clipping: 1.0\n+ megatron_lm_num_micro_batches: 2\n+ megatron_lm_pp_degree: 2\n+ megatron_lm_recompute_activations: true\n+ megatron_lm_sequence_parallelism: true\n+ megatron_lm_tp_degree: 2\n+ megatron_lm_use_distributed_optimizer: true\n+mixed_precision: bf16\n+num_machines: 1\n+num_processes: 8\n+rdzv_backend: static\n+same_network: true\n+use_cpu: false\n+```\n+\n+We will take the example of GPT pre-training. The minimal changes required to the official `run_clm_no_trainer.py` \n+to use Megatron-LM are as follows:\n+\n+1. As Megatron-LM uses its own implementation of Optimizer, the corresponding scheduler compatible with it needs to be used.\n+As such, support for only the Megatron-LM's scheduler is present. User will need to create `accelerate.utils.MegatronLMDummyScheduler`.\n+Example is given below:\n+\n+```python\n+from accelerate.utils import MegatronLMDummyScheduler\n+\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ lr_scheduler = MegatronLMDummyScheduler(\n+ optimizer=optimizer,\n+ total_num_steps=args.max_train_steps,\n+ warmup_num_steps=args.num_warmup_steps,\n+ )\n+else:\n+ lr_scheduler = get_scheduler(\n+ name=args.lr_scheduler_type,\n+ optimizer=optimizer,\n+ num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,\n+ num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,\n+ )\n+```\n+\n+2. Getting the details of the total batch size now needs to be cognization of tensor and pipeline parallel sizes.\n+Example of getting the effective total batch size is shown below:\n+\n+```python\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size\n+else:\n+ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n+```\n+\n+3. When using Megatron-LM, the losses are already averaged across the data parallel group\n+\n+```python\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ losses.append(loss)\n+else:\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\n+\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ losses = torch.tensor(losses)\n+else:\n+ losses = torch.cat(losses)\n+```\n+\n+4. For Megatron-LM, we need to save the model using `accelerator.save_state`\n+\n+```python\n+if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ accelerator.save_state(args.output_dir)\n+else:\n+ unwrapped_model = accelerator.unwrap_model(model)\n+ unwrapped_model.save_pretrained(\n+ args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save\n+ )\n+```\n+\n+That's it! We are good to go πŸš€. Please find the example script in the examples folder at the path `accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py`.\n+Let's run it for `gpt-large` model architecture using 4 A100-80GB GPUs.\n+\n+```bash\n+accelerate launch --config_file megatron_gpt_config.yaml \\\n+examples/by_feature/megatron_lm_gpt_pretraining.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name wikitext \\\n+--dataset_config_name wikitext-2-raw-v1 \\\n+--block_size 1024 \\\n+--learning_rate 5e-5 \\\n+--per_device_train_batch_size 4 \\\n+--per_device_eval_batch_size 4 \\\n+--num_train_epochs 5 \\\n+--with_tracking \\\n+--report_to \"wandb\" \\\n+--output_dir \"awesome_model\"\n+```\n+\n+Below are some important excerpts from the output logs:\n+\n+```bash\n+Loading extension module fused_dense_cuda...\n+>>> done with compiling and loading fused kernels. Compilation time: 3.569 seconds\n+ > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432)\n+Building gpt model in the pre-training mode.\n+The Megatron LM model weights are initialized at random in `accelerator.prepare`. Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup.\n+Preparing dataloader\n+Preparing dataloader\n+Preparing model\n+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 210753280\n+ > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 209445120\n+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 210753280\n+ > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 209445120\n+Preparing optimizer\n+Preparing scheduler\n+> learning rate decay style: linear\n+10/10/2022 22:57:22 - INFO - __main__ - ***** Running training *****\n+10/10/2022 22:57:22 - INFO - __main__ - Num examples = 2318\n+10/10/2022 22:57:22 - INFO - __main__ - Num Epochs = 5\n+10/10/2022 22:57:22 - INFO - __main__ - Instantaneous batch size per device = 24\n+10/10/2022 22:57:22 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 48\n+10/10/2022 22:57:22 - INFO - __main__ - Gradient Accumulation steps = 1\n+10/10/2022 22:57:22 - INFO - __main__ - Total optimization steps = 245\n+ 20%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 49/245 [01:04<04:09, 1.27s/it]\n+ 10/10/2022 22:58:29 - INFO - __main__ - epoch 0: perplexity: 1222.1594275215962 eval_loss: 7.10837459564209\n+ 40%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 98/245 [02:10<03:07, 1.28s/it]\n+ 10/10/2022 22:59:35 - INFO - __main__ - epoch 1: perplexity: 894.5236583794557 eval_loss: 6.796291351318359\n+ 60%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 147/245 [03:16<02:05, 1.28s/it]\n+ 10/10/2022 23:00:40 - INFO - __main__ - epoch 2: perplexity: 702.8458788508042 eval_loss: 6.555137634277344\n+ 80%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 196/245 [04:22<01:02, 1.28s/it]\n+ 10/10/2022 23:01:46 - INFO - __main__ - epoch 3: perplexity: 600.3220028695281 eval_loss: 6.39746618270874\n+100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 245/245 [05:27<00:00, 1.28s/it]\n+```\n+\n+There are a large number of other options/features that one can set using `accelerate.utils.MegatronLMPlugin`.\n+\n+## Advanced features to leverage writing custom train step and Megatron-LM Indexed Datasets\n+\n+For leveraging more features, please go through below details.\n+\n+1. Below is an example of changes required to customize the Train Step while using Megatron-LM. \n+You will implement the `accelerate.utils.AbstractTrainStep` or inherit from their corresponding children \n+`accelerate.utils.GPTTrainStep`, `accelerate.utils.BertTrainStep` or `accelerate.utils.T5TrainStep`.\n+\n+```python\n+from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group\n+\n+# Custom loss function for the Megatron model\n+class GPTTrainStepWithCustomLoss(GPTTrainStep):\n+ def __init__(self, megatron_args, **kwargs):\n+ super().__init__(megatron_args)\n+ self.kwargs = kwargs\n+\n+ def get_loss_func(self):\n+ def loss_func(inputs, loss_mask, output_tensor):\n+ batch_size, seq_length = output_tensor.shape\n+ losses = output_tensor.float()\n+ loss_mask = loss_mask.view(-1).float()\n+ loss = losses.view(-1) * loss_mask\n+\n+ # Resize and average loss per sample\n+ loss_per_sample = loss.view(batch_size, seq_length).sum(axis=1)\n+ loss_mask_per_sample = loss_mask.view(batch_size, seq_length).sum(axis=1)\n+ loss_per_sample = loss_per_sample / loss_mask_per_sample\n+\n+ # Calculate and scale weighting\n+ weights = torch.stack([(inputs == kt).float() for kt in self.kwargs[\"keytoken_ids\"]]).sum(axis=[0, 2])\n+ weights = 1.0 + self.kwargs[\"alpha\"] * weights\n+ # Calculate weighted average\n+ weighted_loss = (loss_per_sample * weights).mean()\n+\n+ # Reduce loss across data parallel groups\n+ averaged_loss = avg_losses_across_data_parallel_group([weighted_loss])\n+\n+ return weighted_loss, {\"lm loss\": averaged_loss[0]}\n+\n+ return loss_func\n+\n+ def get_forward_step_func(self):\n+ def forward_step(data_iterator, model):\n+ \"\"\"Forward step.\"\"\"\n+ # Get the batch.\n+ tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)\n+ output_tensor = model(tokens, position_ids, attention_mask, labels=labels)\n+\n+ return output_tensor, partial(self.loss_func, tokens, loss_mask)\n+\n+ return forward_step\n+\n+\n+def main():\n+ # Custom loss function for the Megatron model\n+ keytoken_ids = []\n+ keywords = [\"plt\", \"pd\", \"sk\", \"fit\", \"predict\", \" plt\", \" pd\", \" sk\", \" fit\", \" predict\"]\n+ for keyword in keywords:\n+ ids = tokenizer([keyword]).input_ids[0]\n+ if len(ids) == 1:\n+ keytoken_ids.append(ids[0])\n+ accelerator.print(f\"Keytoken ids: {keytoken_ids}\")\n+ accelerator.state.megatron_lm_plugin.custom_train_step_class = GPTTrainStepWithCustomLoss\n+ accelerator.state.megatron_lm_plugin.custom_train_step_kwargs = {\n+ \"keytoken_ids\": keytoken_ids,\n+ \"alpha\": 0.25,\n+ }\n+```\n+\n+2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets\n+are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be\n+avaiable and this requires tweaks to the training loop. Being able to do all this shows how\n+felixble and extensible πŸ€— Accelerate is. The changes required are as follows.\n+\n+a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` \n+and pass the required dataset args to it such as `data_path`, `seq_length` etc. \n+See [here](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L804) for the list of available args. \n+ \n+```python\n+from accelerate.utils import MegatronLMDummyDataLoader\n+\n+megatron_dataloader_config = {\n+ \"data_path\": args.data_path,\n+ \"splits_string\": args.splits_string,\n+ \"seq_length\": args.block_size,\n+ \"micro_batch_size\": args.per_device_train_batch_size,\n+}\n+megatron_dataloader = MegatronLMDummyDataLoader(**megatron_dataloader_config)\n+accelerator.state.megatron_lm_plugin.megatron_dataset_flag = True\n+```\n+\n+b. `megatron_dataloader` is repeated 3 times to get training, validation and test dataloaders\n+as per the `args.splits_string` proportions\n+ \n+```python\n+model, optimizer, lr_scheduler, train_dataloader, eval_dataloader, _ = accelerator.prepare(\n+ model, optimizer, lr_scheduler, megatron_dataloader, megatron_dataloader, megatron_dataloader\n+)\n+```\n+\n+c. Changes to training and evaluation loops as dataloader is only available on tensor parallel ranks 0\n+So, we need to iterate only if the dataloader isn't `None` else provide empty dict\n+As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps`\n+This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets.\n+This displays how flexible and extensible πŸ€— Accelerate is.\n+\n+```python\n+while completed_steps < args.max_train_steps:\n+ model.train()\n+ batch = next(train_dataloader) if train_dataloader is not None else {}\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ ...\n+\n+ if completed_steps % eval_interval == 0:\n+ eval_completed_steps = 0\n+ losses = []\n+ while eval_completed_steps < eval_iters:\n+ model.eval()\n+ with torch.no_grad():\n+ batch = next(eval_dataloader) if eval_dataloader is not None else {}\n+ outputs = model(**batch)\n+```\n+\n+ \n+## Utility for Checkpoint reshaping and interoperability\n+\n+1. The scripts for these are present in πŸ€— Transformers library under respective models. \n+Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py)\n+\n+2. Below is an example of conversion of checkpoint from Megatron-LM to universal πŸ€— Transformers sharded checkpoint.\n+```bash\n+python checkpoint_reshaping_and_interoperability.py \\\n+--convert_checkpoint_from_megatron_to_transformers \\\n+--load_path \"gpt/iter_0005000\" \\\n+--save_path \"gpt/trfs_checkpoint\" \\\n+--max_shard_size \"200MB\" \\\n+--tokenizer_name \"gpt2\" \\\n+--print-checkpoint-structure\n+```\n+\n+3. Conversion of checkpoint from transformers to megatron with `tp_size=2`, `pp_size=2` and `dp_size=2`.\n+```bash\n+python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability.py \\\n+--load_path \"gpt/trfs_checkpoint\" \\\n+--save_path \"gpt/megatron_lm_checkpoint\" \\\n+--target_tensor_model_parallel_size 2 \\\n+--target_pipeline_model_parallel_size 2 \\\n+--target_data_parallel_size 2 \\\n+--target_params_dtype \"bf16\" \\\n+--make_vocab_size_divisible_by 128 \\\n+--use_distributed_optimizer \\\n+--print-checkpoint-structure\n+```\n+\n+## Caveats\n+\n+1. Supports Transformers GPT2, Megatron-BERT and T5 models.\n+This covers Decoder only, Encode only and Encoder-Decoder model classes.\n+\n+2. Only loss is returned from model forward pass as \n+there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes.\n+The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.\n+This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and\n+you can easily compute the `perplexity` using the loss. \n+\n+3. The main process is the last rank as the losses are available in the last stage of pipeline.\n+`accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using \n+Megatron-LM integration.\n+\n+4. In `accelerator.prepare` call, a Megatron-LM model corresponding to a given Transformers model is created\n+with random weights. Please use `accelerator.load_state` to load the Megatron-LM checkpoint with matching TP, PP and DP partitions.\n+\n+5. Currently, checkpoint reshaping and interoperability support is only available for GPT. \n+Soon it will be extended to BERT and T5.\n+\n+6. `gradient_accumulation_steps` needs to be 1. When using Megatron-LM, micro batches in pipeline parallelism \n+setting is synonymous with gradient accumulation. \n+\n+7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints.\n+\n+8. Below are the mapping from Megatron-LM model architectures to the the equivalent πŸ€— transformers model architectures.\n+Only these πŸ€— transformers model architectures are supported.\n+\n+a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : \n+πŸ€— transformers models with `megatron-bert` in config's model type, e.g., \n+[MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)\n+ \n+b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : \n+πŸ€— transformers models with `gpt2` in config's model type, e.g., \n+[OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)\n+ \n+c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : \n+πŸ€— transformers models with `t5` in config's model type, e.g., \n+[T5](https://huggingface.co/docs/transformers/model_doc/t5) and \n+[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)\n\\ No newline at end of file\ndiff --git a/examples/by_feature/megatron_lm_gpt_pretraining.py b/examples/by_feature/megatron_lm_gpt_pretraining.py\nnew file mode 100644\nindex 000000000..68a425e4d\n--- /dev/null\n+++ b/examples/by_feature/megatron_lm_gpt_pretraining.py\n@@ -0,0 +1,699 @@\n+#!/usr/bin/env python\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\n+on a text file or a dataset without using HuggingFace Trainer.\n+\n+Here is the full list of checkpoints on the hub that can be fine-tuned by this script:\n+https://huggingface.co/models?filter=text-generation\n+\"\"\"\n+# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n+\n+import argparse\n+import json\n+import logging\n+import math\n+import os\n+import random\n+from itertools import chain\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import datasets\n+import transformers\n+from accelerate import Accelerator, DistributedType\n+from accelerate.logging import get_logger\n+from accelerate.utils import MegatronLMDummyScheduler, set_seed\n+from datasets import load_dataset\n+from huggingface_hub import Repository\n+from tqdm.auto import tqdm\n+from transformers import (\n+ CONFIG_MAPPING,\n+ MODEL_MAPPING,\n+ AutoConfig,\n+ AutoModelForCausalLM,\n+ AutoTokenizer,\n+ SchedulerType,\n+ default_data_collator,\n+ get_scheduler,\n+)\n+from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry\n+from transformers.utils.versions import require_version\n+\n+\n+# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\n+check_min_version(\"4.23.0.dev0\")\n+\n+logger = get_logger(__name__)\n+\n+require_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\n+\n+MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\n+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n+\n+\n+def parse_args():\n+ parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a causal language modeling task\")\n+ parser.add_argument(\n+ \"--dataset_name\",\n+ type=str,\n+ default=None,\n+ help=\"The name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--dataset_config_name\",\n+ type=str,\n+ default=None,\n+ help=\"The configuration name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_split_percentage\",\n+ default=5,\n+ help=\"The percentage of the train set used as validation set in case there's no validation split\",\n+ )\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--config_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained config name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--tokenizer_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained tokenizer name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--use_slow_tokenizer\",\n+ action=\"store_true\",\n+ help=\"If passed, will use a slow tokenizer (not backed by the πŸ€— Tokenizers library).\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_train_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the training dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_eval_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the evaluation dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--learning_rate\",\n+ type=float,\n+ default=5e-5,\n+ help=\"Initial learning rate (after the potential warmup period) to use.\",\n+ )\n+ parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n+ parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n+ parser.add_argument(\n+ \"--max_train_steps\",\n+ type=int,\n+ default=None,\n+ help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ type=int,\n+ default=1,\n+ help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n+ )\n+ parser.add_argument(\n+ \"--lr_scheduler_type\",\n+ type=SchedulerType,\n+ default=\"linear\",\n+ help=\"The scheduler type to use.\",\n+ choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n+ )\n+ parser.add_argument(\n+ \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n+ )\n+ parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n+ parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n+ parser.add_argument(\n+ \"--model_type\",\n+ type=str,\n+ default=None,\n+ help=\"Model type to use if training from scratch.\",\n+ choices=MODEL_TYPES,\n+ )\n+ parser.add_argument(\n+ \"--block_size\",\n+ type=int,\n+ default=None,\n+ help=(\n+ \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n+ \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n+ \" account special tokens).\"\n+ ),\n+ )\n+ parser.add_argument(\n+ \"--preprocessing_num_workers\",\n+ type=int,\n+ default=None,\n+ help=\"The number of processes to use for the preprocessing.\",\n+ )\n+ parser.add_argument(\n+ \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n+ )\n+ parser.add_argument(\n+ \"--no_keep_linebreaks\", action=\"store_true\", help=\"Do not keep line breaks when using TXT files.\"\n+ )\n+ parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n+ parser.add_argument(\n+ \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n+ )\n+ parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to enable experiment trackers for logging.\",\n+ )\n+ parser.add_argument(\n+ \"--report_to\",\n+ type=str,\n+ default=\"all\",\n+ help=(\n+ 'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n+ ' `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` (default) to report to all integrations.'\n+ \"Only applicable when `--with_tracking` is passed.\"\n+ ),\n+ )\n+ args = parser.parse_args()\n+\n+ # Sanity checks\n+ if args.dataset_name is None and args.train_file is None and args.validation_file is None:\n+ raise ValueError(\"Need either a dataset name or a training/validation file.\")\n+ else:\n+ if args.train_file is not None:\n+ extension = args.train_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\n+ if args.validation_file is not None:\n+ extension = args.validation_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\n+\n+ if args.push_to_hub:\n+ assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n+\n+ return args\n+\n+\n+def main():\n+ args = parse_args()\n+\n+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\n+ # information sent is the one passed as arguments along with your Python/PyTorch versions.\n+ send_example_telemetry(\"run_clm_no_trainer\", args)\n+\n+ # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n+ # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers\n+ # in the environment\n+ accelerator_log_kwargs = {}\n+\n+ if args.with_tracking:\n+ accelerator_log_kwargs[\"log_with\"] = args.report_to\n+ accelerator_log_kwargs[\"logging_dir\"] = args.output_dir\n+\n+ accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)\n+\n+ # Make one log on every process with the configuration for debugging.\n+ logging.basicConfig(\n+ format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n+ datefmt=\"%m/%d/%Y %H:%M:%S\",\n+ level=logging.INFO,\n+ )\n+ logger.info(accelerator.state, main_process_only=False)\n+ if accelerator.is_local_main_process:\n+ datasets.utils.logging.set_verbosity_warning()\n+ transformers.utils.logging.set_verbosity_info()\n+ else:\n+ datasets.utils.logging.set_verbosity_error()\n+ transformers.utils.logging.set_verbosity_error()\n+\n+ # If passed along, set the training seed now.\n+ if args.seed is not None:\n+ set_seed(args.seed)\n+\n+ # Handle the repository creation\n+ if accelerator.is_main_process:\n+ if args.push_to_hub:\n+ if args.hub_model_id is None:\n+ repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\n+ else:\n+ repo_name = args.hub_model_id\n+ repo = Repository(args.output_dir, clone_from=repo_name)\n+\n+ with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n+ if \"step_*\" not in gitignore:\n+ gitignore.write(\"step_*\\n\")\n+ if \"epoch_*\" not in gitignore:\n+ gitignore.write(\"epoch_*\\n\")\n+ elif args.output_dir is not None:\n+ os.makedirs(args.output_dir, exist_ok=True)\n+ accelerator.wait_for_everyone()\n+\n+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n+ # (the dataset will be downloaded automatically from the datasets Hub).\n+ #\n+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n+ # 'text' is found. You can easily tweak this behavior (see below).\n+ #\n+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n+ # download the dataset.\n+ if args.dataset_name is not None:\n+ # Downloading and loading a dataset from the hub.\n+ raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n+ if \"validation\" not in raw_datasets.keys():\n+ raw_datasets[\"validation\"] = load_dataset(\n+ args.dataset_name,\n+ args.dataset_config_name,\n+ split=f\"train[:{args.validation_split_percentage}%]\",\n+ )\n+ raw_datasets[\"train\"] = load_dataset(\n+ args.dataset_name,\n+ args.dataset_config_name,\n+ split=f\"train[{args.validation_split_percentage}%:]\",\n+ )\n+ else:\n+ data_files = {}\n+ dataset_args = {}\n+ if args.train_file is not None:\n+ data_files[\"train\"] = args.train_file\n+ if args.validation_file is not None:\n+ data_files[\"validation\"] = args.validation_file\n+ extension = args.train_file.split(\".\")[-1]\n+ if extension == \"txt\":\n+ extension = \"text\"\n+ dataset_args[\"keep_linebreaks\"] = not args.no_keep_linebreaks\n+ raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)\n+ # If no validation data is there, validation_split_percentage will be used to divide the dataset.\n+ if \"validation\" not in raw_datasets.keys():\n+ raw_datasets[\"validation\"] = load_dataset(\n+ extension,\n+ data_files=data_files,\n+ split=f\"train[:{args.validation_split_percentage}%]\",\n+ **dataset_args,\n+ )\n+ raw_datasets[\"train\"] = load_dataset(\n+ extension,\n+ data_files=data_files,\n+ split=f\"train[{args.validation_split_percentage}%:]\",\n+ **dataset_args,\n+ )\n+\n+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n+ # https://huggingface.co/docs/datasets/loading_datasets.html.\n+\n+ # Load pretrained model and tokenizer\n+ #\n+ # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n+ # download model & vocab.\n+ if args.config_name:\n+ config = AutoConfig.from_pretrained(args.config_name)\n+ elif args.model_name_or_path:\n+ config = AutoConfig.from_pretrained(args.model_name_or_path)\n+ else:\n+ config = CONFIG_MAPPING[args.model_type]()\n+ logger.warning(\"You are instantiating a new config instance from scratch.\")\n+\n+ if args.tokenizer_name:\n+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)\n+ elif args.model_name_or_path:\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n+ else:\n+ raise ValueError(\n+ \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n+ \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n+ )\n+\n+ if args.model_name_or_path:\n+ model = AutoModelForCausalLM.from_pretrained(\n+ args.model_name_or_path,\n+ from_tf=bool(\".ckpt\" in args.model_name_or_path),\n+ config=config,\n+ )\n+ else:\n+ logger.info(\"Training new model from scratch\")\n+ model = AutoModelForCausalLM.from_config(config)\n+\n+ model.resize_token_embeddings(len(tokenizer))\n+\n+ # Preprocessing the datasets.\n+ # First we tokenize all the texts.\n+ column_names = raw_datasets[\"train\"].column_names\n+ text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n+\n+ def tokenize_function(examples):\n+ return tokenizer(examples[text_column_name])\n+\n+ with accelerator.main_process_first():\n+ tokenized_datasets = raw_datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ num_proc=args.preprocessing_num_workers,\n+ remove_columns=column_names,\n+ load_from_cache_file=not args.overwrite_cache,\n+ desc=\"Running tokenizer on dataset\",\n+ )\n+\n+ if args.block_size is None:\n+ block_size = tokenizer.model_max_length\n+ if block_size > 1024:\n+ logger.warning(\n+ f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n+ \"Picking 1024 instead. You can change that default value by passing --block_size xxx.\"\n+ )\n+ block_size = 1024\n+ else:\n+ if args.block_size > tokenizer.model_max_length:\n+ logger.warning(\n+ f\"The block_size passed ({args.block_size}) is larger than the maximum length for the model\"\n+ f\"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.\"\n+ )\n+ block_size = min(args.block_size, tokenizer.model_max_length)\n+\n+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.\n+ def group_texts(examples):\n+ # Concatenate all texts.\n+ concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n+ total_length = len(concatenated_examples[list(examples.keys())[0]])\n+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n+ # customize this part to your needs.\n+ if total_length >= block_size:\n+ total_length = (total_length // block_size) * block_size\n+ # Split by chunks of max_len.\n+ result = {\n+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n+ for k, t in concatenated_examples.items()\n+ }\n+ result[\"labels\"] = result[\"input_ids\"].copy()\n+ return result\n+\n+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder\n+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower\n+ # to preprocess.\n+ #\n+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map\n+\n+ with accelerator.main_process_first():\n+ lm_datasets = tokenized_datasets.map(\n+ group_texts,\n+ batched=True,\n+ num_proc=args.preprocessing_num_workers,\n+ load_from_cache_file=not args.overwrite_cache,\n+ desc=f\"Grouping texts in chunks of {block_size}\",\n+ )\n+\n+ train_dataset = lm_datasets[\"train\"]\n+ eval_dataset = lm_datasets[\"validation\"]\n+\n+ # Log a few random samples from the training set:\n+ for index in random.sample(range(len(train_dataset)), 3):\n+ logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n+\n+ # DataLoaders creation:\n+ train_dataloader = DataLoader(\n+ train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size\n+ )\n+\n+ # Optimizer\n+ # Split weights in two groups, one with weight decay and the other not.\n+ no_decay = [\"bias\", \"layer_norm.weight\"]\n+ optimizer_grouped_parameters = [\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n+ \"weight_decay\": args.weight_decay,\n+ },\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n+ \"weight_decay\": 0.0,\n+ },\n+ ]\n+ optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n+\n+ # Scheduler and math around the number of training steps.\n+ overrode_max_train_steps = False\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n+ if args.max_train_steps is None:\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n+ overrode_max_train_steps = True\n+\n+ # New Code\n+ # For Megatron-LM, we need to use `MegatronLMDummyScheduler` instead of regular schedulers\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ lr_scheduler = MegatronLMDummyScheduler(\n+ optimizer=optimizer,\n+ total_num_steps=args.max_train_steps,\n+ warmup_num_steps=args.num_warmup_steps,\n+ )\n+ else:\n+ lr_scheduler = get_scheduler(\n+ name=args.lr_scheduler_type,\n+ optimizer=optimizer,\n+ num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,\n+ num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything with our `accelerator`.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ model.tie_weights()\n+\n+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n+ if overrode_max_train_steps:\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n+ # Afterwards we recalculate our number of training epochs\n+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n+\n+ # Figure out how many steps we should save the Accelerator states\n+ checkpointing_steps = args.checkpointing_steps\n+ if checkpointing_steps is not None and checkpointing_steps.isdigit():\n+ checkpointing_steps = int(checkpointing_steps)\n+\n+ # We need to initialize the trackers we use, and also store our configuration.\n+ # The trackers initializes automatically on the main process.\n+ if args.with_tracking:\n+ experiment_config = vars(args)\n+ # TensorBoard cannot log Enums, need the raw value\n+ experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\n+ accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\n+\n+ # Train!\n+ # New Code\n+ # For Megatron-LM, we need to get `global_batch_size` from megatron_lm_plugin\n+ # as it handles the specifics related to data parallelism, tensor model parallelism and pipeline parallelism\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size\n+ else:\n+ total_batch_size = (\n+ args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n+ )\n+\n+ logger.info(\"***** Running training *****\")\n+ logger.info(f\" Num examples = {len(train_dataset)}\")\n+ logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n+ logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n+ logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n+ logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n+ logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n+ # Only show the progress bar once on each machine.\n+ progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n+ completed_steps = 0\n+ starting_epoch = 0\n+\n+ # Potentially load in the weights and states from a previous save\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ path = os.path.basename(args.resume_from_checkpoint)\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\n+ resume_step = None\n+ else:\n+ # need to multiply `gradient_accumulation_steps` to reflect real steps\n+ resume_step = int(training_difference.replace(\"step_\", \"\")) * args.gradient_accumulation_steps\n+ starting_epoch = resume_step // len(train_dataloader)\n+ resume_step -= starting_epoch * len(train_dataloader)\n+\n+ # update the progress_bar if load from checkpoint\n+ progress_bar.update(starting_epoch * num_update_steps_per_epoch)\n+ completed_steps = starting_epoch * num_update_steps_per_epoch\n+\n+ for epoch in range(starting_epoch, args.num_train_epochs):\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ # We need to skip steps until we reach the resumed step\n+ if args.resume_from_checkpoint and epoch == starting_epoch:\n+ if resume_step is not None and step < resume_step:\n+ if step % args.gradient_accumulation_steps == 0:\n+ progress_bar.update(1)\n+ completed_steps += 1\n+ continue\n+\n+ with accelerator.accumulate(model):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ # We keep track of the loss at each epoch\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ # Checks if the accelerator has performed an optimization step behind the scenes\n+ if accelerator.sync_gradients:\n+ progress_bar.update(1)\n+ completed_steps += 1\n+\n+ if isinstance(checkpointing_steps, int):\n+ if completed_steps % checkpointing_steps == 0:\n+ output_dir = f\"step_{completed_steps }\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ if completed_steps >= args.max_train_steps:\n+ break\n+\n+ model.eval()\n+ losses = []\n+ for step, batch in enumerate(eval_dataloader):\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+\n+ loss = outputs.loss\n+ # New Code\n+ # For Megatron-LM, the losses are already averaged across the data parallel group\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ losses.append(loss)\n+ else:\n+ losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))\n+ try:\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ losses = torch.tensor(losses)\n+ else:\n+ losses = torch.cat(losses)\n+ eval_loss = torch.mean(losses)\n+ perplexity = math.exp(eval_loss)\n+ except OverflowError:\n+ perplexity = float(\"inf\")\n+\n+ logger.info(f\"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}\")\n+\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"perplexity\": perplexity,\n+ \"eval_loss\": eval_loss,\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\n+ \"epoch\": epoch,\n+ \"step\": completed_steps,\n+ },\n+ step=completed_steps,\n+ )\n+\n+ if args.push_to_hub and epoch < args.num_train_epochs - 1:\n+ accelerator.wait_for_everyone()\n+ unwrapped_model = accelerator.unwrap_model(model)\n+ unwrapped_model.save_pretrained(\n+ args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save\n+ )\n+ if accelerator.is_main_process:\n+ tokenizer.save_pretrained(args.output_dir)\n+ repo.push_to_hub(\n+ commit_message=f\"Training in progress epoch {epoch}\", blocking=False, auto_lfs_prune=True\n+ )\n+\n+ if args.checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{epoch}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+ if args.with_tracking:\n+ accelerator.end_training()\n+\n+ if args.output_dir is not None:\n+ accelerator.wait_for_everyone()\n+ # New Code\n+ # For Megatron-LM, we need to save the model using `accelerator.save_state`\n+ if accelerator.distributed_type == DistributedType.MEGATRON_LM:\n+ accelerator.save_state(args.output_dir)\n+ else:\n+ unwrapped_model = accelerator.unwrap_model(model)\n+ unwrapped_model.save_pretrained(\n+ args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save\n+ )\n+ if accelerator.is_main_process:\n+ tokenizer.save_pretrained(args.output_dir)\n+ if args.push_to_hub:\n+ repo.push_to_hub(commit_message=\"End of training\", auto_lfs_prune=True)\n+\n+ with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\n+ json.dump({\"perplexity\": perplexity}, f)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2b6558944..e40797b39 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -41,6 +41,7 @@\n InitProcessGroupKwargs,\n KwargsHandler,\n LoggerType,\n+ MegatronLMPlugin,\n PrecisionType,\n RNGType,\n compare_versions,\n@@ -50,6 +51,7 @@\n get_pretty_name,\n is_bf16_available,\n is_deepspeed_available,\n+ is_megatron_lm_available,\n is_torch_version,\n is_tpu_available,\n pad_across_processes,\n@@ -71,6 +73,21 @@\n DummyScheduler,\n )\n \n+if is_megatron_lm_available():\n+ from .utils import (\n+ MegatronEngine,\n+ MegatronLMDummyDataLoader,\n+ MegatronLMDummyScheduler,\n+ MegatronLMOptimizerWrapper,\n+ MegatronLMSchedulerWrapper,\n+ megatron_lm_initialize,\n+ megatron_lm_prepare_data_loader,\n+ megatron_lm_prepare_model,\n+ megatron_lm_prepare_optimizer,\n+ megatron_lm_prepare_scheduler,\n+ )\n+\n+\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.xla_multiprocessing as xmp\n \n@@ -107,6 +124,9 @@ class Accelerator:\n fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*):\n Tweak your FSDP related args using this argument. This argument is optional and can be configured directly\n using *accelerate config*\n+ megatron_lm_plugin (`MegatronLMPlugin`, *optional*):\n+ Tweak your MegatronLM related args using this argument. This argument is optional and can be configured\n+ directly using *accelerate config*\n rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration in your prepared\n dataloaders. Should be one or several of:\n@@ -166,6 +186,7 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n fsdp_plugin: FullyShardedDataParallelPlugin = None,\n+ megatron_lm_plugin: MegatronLMPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n@@ -218,6 +239,17 @@ def __init__(\n raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\n os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n+ if megatron_lm_plugin is None: # init from env variables\n+ megatron_lm_plugin = MegatronLMPlugin() if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\" else None\n+ else:\n+ if not isinstance(megatron_lm_plugin, MegatronLMPlugin):\n+ raise TypeError(\"`megatron_lm_plugin` must be a MegatronLMPlugin object.\")\n+ os.environ[\"USE_MEGATRON_LM\"] = \"true\" # use MegatronLM if plugin is provided\n+\n+ if megatron_lm_plugin:\n+ if not is_megatron_lm_available():\n+ raise ImportError(\"Megatron is not installed. please build it from source.\")\n+\n # Kwargs handlers\n self.ddp_handler = None\n self.scaler_handler = None\n@@ -247,6 +279,7 @@ def __init__(\n cpu=cpu,\n deepspeed_plugin=deepspeed_plugin,\n fsdp_plugin=fsdp_plugin,\n+ megatron_lm_plugin=megatron_lm_plugin,\n _from_accelerator=True,\n **kwargs,\n )\n@@ -278,7 +311,7 @@ def __init__(\n self.scaler = None\n self.native_amp = False\n err = \"{mode} mixed precision requires {requirement}\"\n- if self.state.mixed_precision == \"fp16\":\n+ if self.state.mixed_precision == \"fp16\" and self.distributed_type != DistributedType.MEGATRON_LM:\n self.native_amp = True\n if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n@@ -289,7 +322,11 @@ def __init__(\n self.scaler = ShardedGradScaler(**kwargs)\n else:\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n- elif self.state.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.FSDP:\n+ elif (\n+ self.state.mixed_precision == \"bf16\"\n+ and self.distributed_type != DistributedType.FSDP\n+ and self.distributed_type != DistributedType.MEGATRON_LM\n+ ):\n self.native_amp = is_bf16_available(True)\n if mixed_precision == \"bf16\" and not self.native_amp and not is_tpu_available():\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n@@ -344,17 +381,27 @@ def device(self):\n @property\n def is_main_process(self):\n \"\"\"True for one process only.\"\"\"\n- return self.process_index == 0\n+ return (\n+ self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process\n+ )\n \n @property\n def is_local_main_process(self):\n \"\"\"True for one process per server.\"\"\"\n- return self.local_process_index == 0\n+ return (\n+ self.local_process_index == 0\n+ if self.distributed_type != DistributedType.MEGATRON_LM\n+ else self.is_last_process\n+ )\n \n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n \n+ @property\n+ def is_last_process(self):\n+ return self.process_index == self.num_processes - 1\n+\n @property\n def mixed_precision(self):\n if self.distributed_type == DistributedType.DEEPSPEED:\n@@ -393,6 +440,18 @@ def wrapper(self, *args, **kwargs):\n \n return wrapper\n \n+ def on_last_process(func):\n+ \"\"\"\n+ A decorator that will run the decorated function on the last process only.\n+ \"\"\"\n+\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_last_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n def on_process(process_idx):\n \"\"\"\n A decorator that will run the decorated function on a given process index only.\n@@ -628,8 +687,8 @@ def prepare(self, *args, device_placement=None):\n \"\"\"\n if device_placement is None:\n device_placement = [None for _ in args]\n- elif self.distributed_type == DistributedType.DEEPSPEED:\n- raise ValueError(\"You can't customize device placements with DeepSpeed.\")\n+ elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM):\n+ raise ValueError(\"You can't customize device placements with DeepSpeed or Megatron-LM.\")\n elif len(device_placement) != len(args):\n raise ValueError(\n f\"`device_placement` should be a list with {len(args)} elements (the number of objects passed).\"\n@@ -677,6 +736,8 @@ def prepare(self, *args, device_placement=None):\n \n if self.distributed_type == DistributedType.DEEPSPEED:\n result = self._prepare_deepspeed(*args)\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\n+ result = self._prepare_megatron_lm(*args)\n else:\n result = tuple(\n self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)\n@@ -923,6 +984,107 @@ def _prepare_deepspeed(self, *args):\n )\n return tuple(result)\n \n+ def _prepare_megatron_lm(self, *args):\n+ megatron_lm_plugin = self.state.megatron_lm_plugin\n+ if not megatron_lm_plugin.megatron_dataset_flag:\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ if len(batch_sizes) == 0:\n+ raise ValueError(\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM.\"\n+ )\n+\n+ micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)\n+ if len(batch_sizes) > 1:\n+ logger.info(\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\n+ f\"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size}).\"\n+ )\n+ else:\n+ for obj in args:\n+ if isinstance(obj, MegatronLMDummyDataLoader):\n+ micro_batch_size = obj.dataset_args[\"micro_batch_size\"]\n+ break\n+\n+ dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)\n+ megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)\n+\n+ model = None\n+ optimizer = None\n+ scheduler = None\n+ is_dummy_scheduler = False\n+ batch_data = None\n+ for obj in args:\n+ if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None:\n+ batch_data = next(iter(obj))\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ elif isinstance(obj, (torch.optim.Optimizer)):\n+ optimizer = obj\n+ elif isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, MegatronLMDummyScheduler)):\n+ scheduler = obj\n+\n+ if model is not None:\n+ megatron_lm_plugin.set_network_size_args(model, batch_data)\n+ if optimizer is not None:\n+ megatron_lm_plugin.set_optimizer_type(optimizer)\n+ if scheduler is not None:\n+ is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler)\n+ if not is_dummy_scheduler:\n+ raise ValueError(\n+ \"You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead.\"\n+ )\n+ megatron_lm_plugin.set_scheduler_args(scheduler)\n+\n+ # initialize megatron-lm\n+ megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)\n+ counter = 0\n+ result = []\n+ for obj in args:\n+ if isinstance(obj, torch.utils.data.DataLoader):\n+ result.append(megatron_lm_prepare_data_loader(self, obj))\n+ counter += 1\n+ elif isinstance(obj, MegatronLMDummyDataLoader):\n+ if counter == 0:\n+ obj.set_megatron_data_args()\n+ dataloaders = megatron_lm_prepare_data_loader(self, obj)\n+ result.append(dataloaders[counter])\n+ counter += 1\n+ else:\n+ result.append(obj)\n+\n+ if model is not None:\n+ model = megatron_lm_prepare_model(self)\n+ if optimizer is not None:\n+ optimizer = megatron_lm_prepare_optimizer(self, model)\n+ if scheduler is not None:\n+ scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)\n+\n+ if model is not None:\n+ model = MegatronEngine(self, model, optimizer, scheduler)\n+ if optimizer is not None:\n+ optimizer = MegatronLMOptimizerWrapper(optimizer)\n+ if scheduler is not None:\n+ scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)\n+\n+ for i in range(len(result)):\n+ if isinstance(result[i], torch.nn.Module):\n+ result[i] = model\n+ elif isinstance(result[i], torch.optim.Optimizer):\n+ result[i] = optimizer\n+ elif isinstance(result[i], MegatronLMDummyScheduler):\n+ result[i] = scheduler\n+ if model is not None:\n+ self._models.append(model)\n+ if optimizer is not None:\n+ self._optimizers.append(optimizer)\n+ if scheduler is not None:\n+ self._schedulers.append(scheduler)\n+ if len(self._models) > 1:\n+ raise AssertionError(\n+ \"You can't use same `Accelerator()` instance with multiple models when using Megatron-LM\"\n+ )\n+ return tuple(result)\n+\n def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_placement=None):\n \"\"\"\n Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use\n@@ -1001,6 +1163,8 @@ def backward(self, loss, **kwargs):\n loss = loss / self.gradient_accumulation_steps\n if self.distributed_type == DistributedType.DEEPSPEED:\n self.deepspeed_engine_wrapped.backward(loss, **kwargs)\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\n+ return\n elif self.scaler is not None:\n self.scaler.scale(loss).backward(**kwargs)\n else:\n@@ -1312,6 +1476,10 @@ def save_state(self, output_dir: str):\n ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\n model.save_checkpoint(output_dir, ckpt_id)\n logger.info(f\"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}\")\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\n+ logger.info(\"Saving Megatron-LM Model, Optimizer and Scheduler\")\n+ model.save_checkpoint(output_dir)\n+ logger.info(f\"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}\")\n else:\n weights.append(self.get_state_dict(model, unwrap=False))\n \n@@ -1322,7 +1490,7 @@ def save_state(self, output_dir: str):\n logger.info(\"Saving FSDP Optimizer\")\n self.state.fsdp_plugin.save_optimizer(self, opt, self._models[i], output_dir, i)\n logger.info(f\"FSDP Optimizer saved to output dir {output_dir}\")\n- elif self.distributed_type != DistributedType.DEEPSPEED:\n+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\n optimizers = self._optimizers\n \n # Save the lr schedulers taking care of DeepSpeed nuances\n@@ -1332,7 +1500,7 @@ def save_state(self, output_dir: str):\n if isinstance(scheduler, DeepSpeedSchedulerWrapper):\n continue\n schedulers.append(scheduler)\n- else:\n+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:\n schedulers = self._schedulers\n \n save_location = save_accelerator_state(\n@@ -1374,6 +1542,10 @@ def load_state(self, input_dir: str):\n ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\n model.load_checkpoint(input_dir, ckpt_id)\n logger.info(f\"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}\")\n+ elif self.distributed_type == DistributedType.MEGATRON_LM:\n+ logger.info(\"Loading Megatron-LM Model, Optimizer and Scheduler\")\n+ model.load_checkpoint(input_dir)\n+ logger.info(f\"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}\")\n else:\n models.append(model)\n \n@@ -1384,7 +1556,7 @@ def load_state(self, input_dir: str):\n logger.info(\"Loading FSDP Optimizer\")\n self.state.fsdp_plugin.load_optimizer(self, opt, self._models[i], input_dir, i)\n logger.info(f\"FSDP Optimizer loaded from input dir {input_dir}\")\n- elif self.distributed_type != DistributedType.DEEPSPEED:\n+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\n optimizers = self._optimizers\n \n # Load the lr schedulers taking care of DeepSpeed nuances\n@@ -1394,7 +1566,7 @@ def load_state(self, input_dir: str):\n if isinstance(scheduler, DeepSpeedSchedulerWrapper):\n continue\n schedulers.append(scheduler)\n- else:\n+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:\n schedulers = self._schedulers\n \n load_accelerator_state(input_dir, models, optimizers, schedulers, self.state.process_index, self.scaler)\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex d5e816aab..00dfce73e 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -155,15 +155,18 @@ def load_accelerator_state(input_dir, models, optimizers, schedulers, process_in\n logger.info(\"GradScaler state loaded successfully\")\n \n # Random states\n- states = torch.load(os.path.join(input_dir, f\"{RNG_STATE_NAME}_{process_index}.pkl\"))\n- random.setstate(states[\"random_state\"])\n- np.random.set_state(states[\"numpy_random_seed\"])\n- torch.set_rng_state(states[\"torch_manual_seed\"])\n- torch.cuda.set_rng_state_all(states[\"torch_cuda_manual_seed\"])\n- # ^^ safe to call this function even if cuda is not available\n- if is_tpu_available():\n- xm.set_rng_state(states[\"xm_seed\"])\n- logger.info(\"All random states loaded successfully\")\n+ try:\n+ states = torch.load(os.path.join(input_dir, f\"{RNG_STATE_NAME}_{process_index}.pkl\"))\n+ random.setstate(states[\"random_state\"])\n+ np.random.set_state(states[\"numpy_random_seed\"])\n+ torch.set_rng_state(states[\"torch_manual_seed\"])\n+ torch.cuda.set_rng_state_all(states[\"torch_cuda_manual_seed\"])\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(states[\"xm_seed\"])\n+ logger.info(\"All random states loaded successfully\")\n+ except:\n+ logger.info(\"Could not load random states\")\n \n \n def save_custom_state(obj, path, index: int = 0):\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex e86a0abff..0fa3ceab2 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -275,6 +275,67 @@ def get_cluster_input():\n default=\"FULL_STATE_DICT\",\n )\n \n+ megatron_lm_config = {}\n+ if distributed_type in [DistributedType.MULTI_GPU]:\n+ use_megatron_lm = _ask_field(\n+ \"Do you want to use Megatron-LM ? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_megatron_lm:\n+ distributed_type = DistributedType.MEGATRON_LM\n+ if distributed_type == DistributedType.MEGATRON_LM:\n+ prefix = \"megatron_lm_\"\n+ megatron_lm_config[prefix + \"tp_degree\"] = _ask_field(\n+ \"What is the Tensor Parallelism degree/size? [1]:\",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n+ if megatron_lm_config[prefix + \"tp_degree\"] > 1:\n+ megatron_lm_config[prefix + \"sequence_parallelism\"] = _ask_field(\n+ \"Do you want to enable Sequence Parallelism? [YES/no]: \",\n+ _convert_yes_no_to_bool,\n+ default=True,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+\n+ megatron_lm_config[prefix + \"pp_degree\"] = _ask_field(\n+ \"What is the Pipeline Parallelism degree/size? [1]:\",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n+ if megatron_lm_config[prefix + \"pp_degree\"] > 1:\n+ megatron_lm_config[prefix + \"num_micro_batches\"] = _ask_field(\n+ \"What is the number of micro-batches? [1]:\",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n+\n+ megatron_lm_config[prefix + \"recompute_activations\"] = _ask_field(\n+ \"Do you want to enable selective activation recomputation? [YES/no]: \",\n+ _convert_yes_no_to_bool,\n+ default=True,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+\n+ megatron_lm_config[prefix + \"use_distributed_optimizer\"] = _ask_field(\n+ \"Do you want to use distributed optimizer \"\n+ \"which shards optimizer state and gradients across data pralellel ranks? [YES/no]: \",\n+ _convert_yes_no_to_bool,\n+ default=True,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+\n+ megatron_lm_config[prefix + \"gradient_clipping\"] = _ask_field(\n+ \"What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: \",\n+ lambda x: float(x),\n+ default=1.0,\n+ )\n+\n if distributed_type == DistributedType.TPU:\n main_training_function = _ask_field(\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n@@ -295,7 +356,7 @@ def get_cluster_input():\n default=1,\n error_message=\"Please enter an integer.\",\n )\n- elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\n+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:\n num_processes = _ask_field(\n \"How many GPU(s) should be used for distributed training? [1]:\",\n lambda x: int(x),\n@@ -343,6 +404,7 @@ def get_cluster_input():\n main_training_function=main_training_function,\n deepspeed_config=deepspeed_config,\n fsdp_config=fsdp_config,\n+ megatron_lm_config=megatron_lm_config,\n use_cpu=use_cpu,\n rdzv_backend=rdzv_backend,\n same_network=same_network,\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 43faf45ca..159f951dd 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -146,6 +146,8 @@ class ClusterConfig(BaseConfig):\n deepspeed_config: dict = None\n # args for fsdp\n fsdp_config: dict = None\n+ # args for megatron_lm\n+ megatron_lm_config: dict = None\n # args for TPU\n downcast_bf16: bool = False\n \n@@ -154,6 +156,8 @@ def __post_init__(self):\n self.deepspeed_config = {}\n if self.fsdp_config is None:\n self.fsdp_config = {}\n+ if self.megatron_lm_config is None:\n+ self.megatron_lm_config = {}\n return super().__post_init__()\n \n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex f348bb0a6..c297223cb 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -83,6 +83,8 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\n )\n+\n+ # deepspeed args\n parser.add_argument(\n \"--use_deepspeed\",\n default=False,\n@@ -163,6 +165,8 @@ def launch_command_parser(subparsers=None):\n type=str,\n help=\"DeepSpeed multi-node launcher to use.\",\n )\n+\n+ # fsdp args\n parser.add_argument(\n \"--use_fsdp\",\n default=False,\n@@ -236,6 +240,62 @@ def launch_command_parser(subparsers=None):\n type=str,\n help=\"This argument is deprecated. Use `fsdp_transformer_layer_cls_to_wrap` instead.\",\n )\n+\n+ # megatron_lm args\n+ parser.add_argument(\n+ \"--use_megatron_lm\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether to use Megatron-LM.\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_tp_degree\",\n+ type=int,\n+ default=1,\n+ help=\"Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_pp_degree\",\n+ type=int,\n+ default=1,\n+ help=\"Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_num_micro_batches\",\n+ type=int,\n+ default=None,\n+ help=\"Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_sequence_parallelism\",\n+ default=None,\n+ type=str,\n+ help=\"Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. \"\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_recompute_activations\",\n+ default=None,\n+ type=str,\n+ help=\"Decides Whether (true|false) to enable Selective Activation Recomputation. \"\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_use_distributed_optimizer\",\n+ default=None,\n+ type=str,\n+ help=\"Decides Whether (true|false) to use distributed optimizer \"\n+ \"which shards optimizer state and gradients across Data Pralellel (DP) ranks. \"\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--megatron_lm_gradient_clipping\",\n+ default=1.0,\n+ type=float,\n+ help=\"Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). \"\n+ \"(useful only when `use_megatron_lm` flag is passed).\",\n+ )\n+\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n@@ -487,6 +547,22 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n+\n+ if args.use_megatron_lm:\n+ prefix = \"MEGATRON_LM_\"\n+ current_env[\"USE_MEGATRON_LM\"] = \"true\"\n+ current_env[prefix + \"TP_DEGREE\"] = str(args.megatron_lm_tp_degree)\n+ current_env[prefix + \"PP_DEGREE\"] = str(args.megatron_lm_pp_degree)\n+ current_env[prefix + \"GRADIENT_CLIPPING\"] = str(args.megatron_lm_gradient_clipping)\n+ if args.megatron_lm_num_micro_batches is not None:\n+ current_env[prefix + \"NUM_MICRO_BATCHES\"] = str(args.megatron_lm_num_micro_batches)\n+ if args.megatron_lm_sequence_parallelism is not None:\n+ current_env[prefix + \"SEQUENCE_PARALLELISM\"] = str(args.megatron_lm_sequence_parallelism)\n+ if args.megatron_lm_recompute_activations is not None:\n+ current_env[prefix + \"RECOMPUTE_ACTIVATIONS\"] = str(args.megatron_lm_recompute_activations)\n+ if args.megatron_lm_use_distributed_optimizer is not None:\n+ current_env[prefix + \"USE_DISTRIBUTED_OPTIMIZER\"] = str(args.megatron_lm_use_distributed_optimizer)\n+\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n if is_torch_version(\"<\", \"1.9.0\"):\n raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\n@@ -825,12 +901,14 @@ def launch_command(args):\n and not args.use_deepspeed\n and not args.use_fsdp\n and not args.use_mps_device\n+ and not args.use_megatron_lm\n ):\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM\n if not args.use_mps_device:\n if args.gpu_ids is None:\n if defaults.gpu_ids is not None:\n@@ -851,6 +929,8 @@ def launch_command(args):\n if \"fsdp\" not in arg_to_set:\n arg_to_set = \"fsdp_\" + arg_to_set\n setattr(args, arg_to_set, defaults.fsdp_config[k])\n+ for k in defaults.megatron_lm_config:\n+ setattr(args, k, defaults.megatron_lm_config[k])\n continue\n \n # Those args are handled separately\n@@ -901,6 +981,8 @@ def launch_command(args):\n deepspeed_launcher(args)\n elif args.use_fsdp and not args.cpu:\n multi_gpu_launcher(args)\n+ elif args.use_megatron_lm and not args.cpu:\n+ multi_gpu_launcher(args)\n elif args.multi_gpu and not args.cpu:\n multi_gpu_launcher(args)\n elif args.tpu and not args.cpu:\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex fac1b50e2..c4a015156 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -625,7 +625,7 @@ def prepare_data_loader(\n sampler_is_batch_sampler = False\n generator = getattr(dataloader, \"generator\", None)\n # No change if no multiprocess\n- if num_processes != 1 and not dispatch_batches:\n+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:\n if isinstance(new_dataset, IterableDataset):\n if getattr(dataloader.dataset, \"generator\", None) is not None:\n generator = dataloader.dataset.generator\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\nindex 2c67e24eb..dca093215 100644\n--- a/src/accelerate/logging.py\n+++ b/src/accelerate/logging.py\n@@ -15,6 +15,7 @@\n import logging\n \n from .state import AcceleratorState\n+from .utils import DistributedType\n \n \n class MultiProcessAdapter(logging.LoggerAdapter):\n@@ -28,7 +29,12 @@ class MultiProcessAdapter(logging.LoggerAdapter):\n @staticmethod\n def _should_log(main_process_only):\n \"Check if log should be performed\"\n- return not main_process_only or (main_process_only and AcceleratorState().local_process_index == 0)\n+ state = AcceleratorState()\n+ if state.distributed_type != DistributedType.MEGATRON_LM:\n+ process_index_flag = state.local_process_index == 0\n+ else:\n+ process_index_flag = state.process_index == state.num_processes - 1\n+ return not main_process_only or (main_process_only and process_index_flag)\n \n def log(self, level, msg, *args, **kwargs):\n \"\"\"\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex a700cfad6..82ea288f0 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -70,6 +70,7 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin=None,\n fsdp_plugin=None,\n+ megatron_lm_plugin=None,\n _from_accelerator: bool = False,\n **kwargs,\n ):\n@@ -162,6 +163,10 @@ def __init__(\n if self.mixed_precision != \"no\":\n fsdp_plugin.set_mixed_precision(self.mixed_precision)\n self.fsdp_plugin = fsdp_plugin\n+ if os.environ.get(\"USE_MEGATRON_LM\", \"false\") == \"true\":\n+ self.distributed_type = DistributedType.MEGATRON_LM\n+ megatron_lm_plugin.set_mixed_precision(self.mixed_precision)\n+ self.megatron_lm_plugin = megatron_lm_plugin\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n self.distributed_type = DistributedType.MULTI_CPU\n if is_ccl_available() and get_int_from_env([\"CCL_WORKER_COUNT\"], 0) > 0:\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 4bc71ba83..b472ec556 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -13,6 +13,7 @@\n InitProcessGroupKwargs,\n KwargsHandler,\n LoggerType,\n+ MegatronLMPlugin,\n PrecisionType,\n RNGType,\n SageMakerDistributedType,\n@@ -28,6 +29,7 @@\n is_comet_ml_available,\n is_datasets_available,\n is_deepspeed_available,\n+ is_megatron_lm_available,\n is_rich_available,\n is_sagemaker_available,\n is_tensorboard_available,\n@@ -94,6 +96,23 @@\n )\n \n from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\n+from .megatron_lm import (\n+ AbstractTrainStep,\n+ BertTrainStep,\n+ GPTTrainStep,\n+ MegatronEngine,\n+ MegatronLMDummyDataLoader,\n+ MegatronLMDummyScheduler,\n+ MegatronLMOptimizerWrapper,\n+ MegatronLMSchedulerWrapper,\n+ T5TrainStep,\n+ avg_losses_across_data_parallel_group,\n+)\n+from .megatron_lm import initialize as megatron_lm_initialize\n+from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader\n+from .megatron_lm import prepare_model as megatron_lm_prepare_model\n+from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer\n+from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler\n from .memory import find_executable_batch_size\n from .other import (\n extract_model_from_parallel,\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 9f6e30bec..8227fa5bc 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -16,6 +16,7 @@\n General namespace and dataclass related classes\n \"\"\"\n \n+import argparse\n import copy\n import enum\n import functools\n@@ -24,7 +25,8 @@\n import warnings\n from dataclasses import dataclass, field\n from datetime import timedelta\n-from typing import Any, Callable, Iterable, Optional\n+from distutils.util import strtobool\n+from typing import Any, Callable, Dict, Iterable, List, Optional\n \n import torch\n \n@@ -127,6 +129,7 @@ class DistributedType(str, enum.Enum):\n FSDP = \"FSDP\"\n TPU = \"TPU\"\n MPS = \"MPS\"\n+ MEGATRON_LM = \"MEGATRON_LM\"\n \n \n class SageMakerDistributedType(str, enum.Enum):\n@@ -644,3 +647,351 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\n # called from all ranks, though only rank0 has a valid param for full_osd\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\n optimizer.load_state_dict(sharded_osd)\n+\n+\n+@dataclass\n+class MegatronLMPlugin:\n+ \"\"\"\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\n+ activation recomputation and optimized fused kernels.\n+ \"\"\"\n+\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\n+ gradient_clipping: float = field(\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\n+ )\n+ sequence_parallelism: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable sequence parallelism\"},\n+ )\n+ recompute_activation: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable selective activation recomputation\"},\n+ )\n+ use_distributed_optimizer: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable distributed optimizer\"},\n+ )\n+ pipeline_model_parallel_split_rank: int = field(\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\n+ )\n+ num_layers_per_virtual_pipeline_stage: int = field(\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\n+ )\n+ train_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Total number of iterations to train over all training runs. \"\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\n+ },\n+ )\n+ train_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Total number of samples to train over all training runs. \"\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\n+ },\n+ )\n+ weight_decay_incr_style: str = field(\n+ default=\"constant\",\n+ metadata={\"help\": 'Weight decay increment function. choices=[\"constant\", \"linear\", \"cosine\"]. '},\n+ )\n+ start_weight_decay: float = field(\n+ default=None,\n+ metadata={\"help\": \"Initial weight decay coefficient for L2 regularization.\"},\n+ )\n+ end_weight_decay: float = field(\n+ default=None,\n+ metadata={\"help\": \"End of run weight decay coefficient for L2 regularization.\"},\n+ )\n+ lr_decay_style: str = field(\n+ default=\"linear\",\n+ metadata={\"help\": \"Learning rate decay function. choices=['constant', 'linear', 'cosine'].\"},\n+ )\n+ lr_decay_iters: int = field(\n+ default=None,\n+ metadata={\"help\": \"Number of iterations for learning rate decay. If None defaults to `train_iters`.\"},\n+ )\n+ lr_decay_samples: int = field(\n+ default=None,\n+ metadata={\"help\": \"Number of samples for learning rate decay. If None defaults to `train_samples`.\"},\n+ )\n+ lr_warmup_iters: int = field(\n+ default=None,\n+ metadata={\"help\": \"number of iterations to linearly warmup learning rate over.\"},\n+ )\n+ lr_warmup_samples: int = field(\n+ default=None,\n+ metadata={\"help\": \"number of samples to linearly warmup learning rate over.\"},\n+ )\n+ lr_warmup_fraction: float = field(\n+ default=None,\n+ metadata={\"help\": \"fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over.\"},\n+ )\n+ min_lr: float = field(\n+ default=0,\n+ metadata={\"help\": \"Minumum value for learning rate. The scheduler clip values below this threshold.\"},\n+ )\n+ consumed_samples: List[int] = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call.\"\n+ },\n+ )\n+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to disable weight decay.\"})\n+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to scale learning rate.\"})\n+ lr_mult: float = field(default=1.0, metadata={\"help\": \"Learning rate multiplier.\"})\n+ megatron_dataset_flag: bool = field(\n+ default=False,\n+ metadata={\"help\": \"Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format.\"},\n+ )\n+ seq_length: int = field(\n+ default=None,\n+ metadata={\"help\": \"Maximum sequence length to process.\"},\n+ )\n+ encoder_seq_length: int = field(\n+ default=None,\n+ metadata={\"help\": \"Maximum sequence length to process for the encoder.\"},\n+ )\n+ decoder_seq_length: int = field(\n+ default=None,\n+ metadata={\"help\": \"Maximum sequence length to process for the decoder.\"},\n+ )\n+ tensorboard_dir: str = field(\n+ default=None,\n+ metadata={\"help\": \"Path to save tensorboard logs.\"},\n+ )\n+ set_all_logging_options: bool = field(\n+ default=False,\n+ metadata={\"help\": \"Whether to set all logging options.\"},\n+ )\n+ custom_train_step_class: Optional[Any] = field(\n+ default=None,\n+ metadata={\"help\": \"Custom train step class.\"},\n+ )\n+ custom_train_step_kwargs: Optional[Dict[str, Any]] = field(\n+ default=None,\n+ metadata={\"help\": \"Custom train step kwargs.\"},\n+ )\n+ eval_iters: int = field(\n+ default=100, metadata={\"help\": \"Number of iterations to run for evaluation validation/test for.\"}\n+ )\n+ eval_interval: int = field(\n+ default=1000, metadata={\"help\": \"Interval between running evaluation on validation set.\"}\n+ )\n+\n+ def __post_init__(self):\n+ prefix = \"MEGATRON_LM_\"\n+ if self.tp_degree is None:\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\n+ if self.pp_degree is None:\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\n+ if self.num_micro_batches is None:\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\n+ if self.gradient_clipping is None:\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\n+ if self.recompute_activation is None:\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\n+ if self.use_distributed_optimizer is None:\n+ self.use_distributed_optimizer = (\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\n+ )\n+ if self.sequence_parallelism is None:\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\n+\n+ if self.pp_degree > 1 or self.use_distributed_optimizer:\n+ self.DDP_impl = \"local\"\n+ else:\n+ self.DDP_impl = \"torch\"\n+\n+ if self.consumed_samples is not None:\n+ if len(self.consumed_samples) == 1:\n+ self.consumed_samples.extend([0, 0])\n+ elif len(self.consumed_samples) == 2:\n+ self.consumed_samples.append(0)\n+\n+ self.megatron_lm_default_args = {\n+ \"tensor_model_parallel_size\": self.tp_degree,\n+ \"pipeline_model_parallel_size\": self.pp_degree,\n+ \"pipeline_model_parallel_split_rank\": self.pipeline_model_parallel_split_rank,\n+ \"num_layers_per_virtual_pipeline_stage\": self.num_layers_per_virtual_pipeline_stage,\n+ \"DDP_impl\": self.DDP_impl,\n+ \"use_distributed_optimizer\": self.use_distributed_optimizer,\n+ \"sequence_parallel\": self.sequence_parallelism,\n+ \"clip_grad\": self.gradient_clipping,\n+ \"num_micro_batches\": self.num_micro_batches,\n+ \"consumed_samples\": self.consumed_samples,\n+ \"no_wd_decay_cond\": self.no_wd_decay_cond,\n+ \"scale_lr_cond\": self.scale_lr_cond,\n+ \"lr_mult\": self.lr_mult,\n+ \"megatron_dataset_flag\": self.megatron_dataset_flag,\n+ \"eval_iters\": self.eval_iters,\n+ \"eval_interval\": self.eval_interval,\n+ }\n+ if self.recompute_activation:\n+ self.megatron_lm_default_args[\"recompute_granularity\"] = \"selective\"\n+ if self.tensorboard_dir is not None:\n+ self.megatron_lm_default_args[\"tensorboard_dir\"] = self.tensorboard_dir\n+ if self.set_all_logging_options:\n+ self.set_tensorboard_logging_options()\n+\n+ def set_network_size_args(self, model, batch_data=None):\n+ # Check if the model is either BERT, GPT or T5 else raise error\n+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'\n+ if \"megatron-bert\" in model.config.model_type.lower():\n+ model_type_name = \"bert\"\n+ num_layers = model.config.num_hidden_layers\n+ hidden_size = model.config.hidden_size\n+ num_attention_heads = model.config.num_attention_heads\n+ max_position_embeddings = model.config.max_position_embeddings\n+ num_labels = model.config.num_labels\n+ orig_vocab_size = model.config.vocab_size\n+ if \"maskedlm\" in model.__class__.__name__.lower():\n+ pretraining_flag = True\n+ if self.seq_length is not None:\n+ if self.encoder_seq_length is not None:\n+ warnings.warn(\"Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.\")\n+ self.seq_length = self.encoder_seq_length\n+ elif self.encoder_seq_length is not None:\n+ self.seq_length = self.encoder_seq_length\n+ elif batch_data is not None:\n+ self.seq_length = batch_data[\"input_ids\"].shape[1]\n+ else:\n+ self.seq_length = max_position_embeddings\n+ self.megatron_lm_default_args[\"seq_length\"] = self.seq_length\n+ elif \"gpt2\" in model.config.model_type.lower():\n+ model_type_name = \"gpt\"\n+ num_layers = model.config.n_layer\n+ hidden_size = model.config.n_embd\n+ num_attention_heads = model.config.n_head\n+ max_position_embeddings = model.config.n_positions\n+ orig_vocab_size = model.config.vocab_size\n+ pretraining_flag = True\n+ if self.seq_length is not None:\n+ if self.decoder_seq_length is not None:\n+ warnings.warn(\"Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.\")\n+ self.seq_length = self.decoder_seq_length\n+ elif self.decoder_seq_length is not None:\n+ self.seq_length = self.decoder_seq_length\n+ elif batch_data is not None:\n+ self.seq_length = batch_data[\"input_ids\"].shape[1]\n+ else:\n+ self.seq_length = max_position_embeddings\n+ self.megatron_lm_default_args[\"seq_length\"] = self.seq_length\n+ elif \"t5\" in model.config.model_type.lower():\n+ model_type_name = \"t5\"\n+ num_layers = model.config.num_layers\n+ hidden_size = model.config.d_model\n+ num_attention_heads = model.config.num_heads\n+ max_position_embeddings = model.config.n_positions if hasattr(model.config, \"n_positions\") else 1024\n+ orig_vocab_size = model.config.vocab_size\n+ pretraining_flag = True\n+ if self.encoder_seq_length is None:\n+ if batch_data is not None:\n+ self.encoder_seq_length = batch_data[\"input_ids\"].shape[1]\n+ else:\n+ self.encoder_seq_length = max_position_embeddings\n+ if self.decoder_seq_length is None:\n+ if batch_data is not None:\n+ self.decoder_seq_length = batch_data[\"labels\"].shape[1]\n+ else:\n+ self.decoder_seq_length = max_position_embeddings\n+\n+ self.megatron_lm_default_args[\"encoder_seq_length\"] = self.encoder_seq_length\n+ self.megatron_lm_default_args[\"decoder_seq_length\"] = self.decoder_seq_length\n+ else:\n+ raise ValueError(\n+ \"πŸ€— Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. \"\n+ \"Please check the model you are using is one of those.\"\n+ )\n+\n+ self.megatron_lm_default_args[\"model_type_name\"] = model_type_name\n+ self.megatron_lm_default_args[\"num_layers\"] = num_layers\n+ self.megatron_lm_default_args[\"hidden_size\"] = hidden_size\n+ self.megatron_lm_default_args[\"num_attention_heads\"] = num_attention_heads\n+ self.megatron_lm_default_args[\"max_position_embeddings\"] = max_position_embeddings\n+ self.megatron_lm_default_args[\"pretraining_flag\"] = pretraining_flag\n+ self.megatron_lm_default_args[\"orig_vocab_size\"] = orig_vocab_size\n+ self.megatron_lm_default_args[\"model_return_dict\"] = model.config.return_dict\n+ if model_type_name == \"bert\":\n+ self.megatron_lm_default_args[\"num_labels\"] = num_labels\n+\n+ def set_mixed_precision(self, mixed_precision):\n+ if mixed_precision == \"fp16\":\n+ self.megatron_lm_default_args[\"fp16\"] = True\n+ elif mixed_precision == \"bf16\":\n+ self.megatron_lm_default_args[\"bf16\"] = True\n+ self.DDP_impl = \"local\"\n+ self.megatron_lm_default_args[\"DDP_impl\"] = self.DDP_impl\n+\n+ def set_training_args(self, micro_batch_size, dp_degree):\n+ self.data_parallel_size = dp_degree\n+ self.micro_batch_size = micro_batch_size\n+ self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches\n+ self.megatron_lm_default_args[\"data_parallel_size\"] = self.data_parallel_size\n+ self.megatron_lm_default_args[\"micro_batch_size\"] = self.micro_batch_size\n+ self.megatron_lm_default_args[\"global_batch_size\"] = self.global_batch_size\n+\n+ def set_optimizer_type(self, optimizer):\n+ optimizer_name = optimizer.__class__.__name__.lower()\n+ if \"adam\" in optimizer_name:\n+ self.megatron_lm_default_args[\"optimizer\"] = \"adam\"\n+ self.megatron_lm_default_args[\"adam_beta1\"] = optimizer.defaults[\"betas\"][0]\n+ self.megatron_lm_default_args[\"adam_beta2\"] = optimizer.defaults[\"betas\"][1]\n+ self.megatron_lm_default_args[\"adam_eps\"] = optimizer.defaults[\"eps\"]\n+ elif \"sgd\" in optimizer_name:\n+ self.megatron_lm_default_args[\"optimizer\"] = \"sgd\"\n+ self.megatron_lm_default_args[\"sgd_momentum\"] = optimizer.defaults[\"momentum\"]\n+ else:\n+ raise ValueError(f\"Optimizer {optimizer_name} is not supported by Megatron-LM\")\n+\n+ self.megatron_lm_default_args[\"lr\"] = optimizer.defaults[\"lr\"]\n+ self.megatron_lm_default_args[\"weight_decay\"] = optimizer.defaults[\"weight_decay\"]\n+\n+ def set_scheduler_args(self, scheduler):\n+ if self.train_iters is None:\n+ self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args[\"data_parallel_size\"]\n+ if self.train_samples is not None:\n+ self.train_samples = None\n+ warnings.warn(\n+ \"Ignoring `train_samples` as `train_iters` based on scheduler is being used for training.\"\n+ )\n+ if self.lr_warmup_iters is None:\n+ self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args[\"data_parallel_size\"]\n+ if self.lr_warmup_samples is not None:\n+ warnings.warn(\n+ \"Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training.\"\n+ )\n+ self.lr_warmup_samples = 0\n+\n+ self.megatron_lm_default_args[\"train_iters\"] = self.train_iters\n+ self.megatron_lm_default_args[\"lr_warmup_iters\"] = self.lr_warmup_iters\n+ self.megatron_lm_default_args[\"train_samples\"] = self.train_samples\n+ self.megatron_lm_default_args[\"lr_warmup_samples\"] = self.lr_warmup_samples\n+ self.megatron_lm_default_args[\"lr_decay_iters\"] = self.lr_decay_iters\n+ self.megatron_lm_default_args[\"lr_decay_samples\"] = self.lr_decay_samples\n+ self.megatron_lm_default_args[\"lr_warmup_fraction\"] = self.lr_warmup_fraction\n+ self.megatron_lm_default_args[\"lr_decay_style\"] = self.lr_decay_style\n+ self.megatron_lm_default_args[\"weight_decay_incr_style\"] = self.weight_decay_incr_style\n+ self.megatron_lm_default_args[\"start_weight_decay\"] = self.start_weight_decay\n+ self.megatron_lm_default_args[\"end_weight_decay\"] = self.end_weight_decay\n+ self.megatron_lm_default_args[\"min_lr\"] = self.min_lr\n+\n+ def set_tensorboard_logging_options(self):\n+ from megatron.arguments import _add_logging_args\n+\n+ parser = argparse.ArgumentParser()\n+ parser = _add_logging_args(parser)\n+ logging_args = parser.parse_known_args()\n+ self.dataset_args = vars(logging_args[0])\n+ for key, value in self.dataset_args.items():\n+ if key.startswith(\"log_\"):\n+ self.megatron_lm_default_args[key] = True\n+ elif key.startswith(\"no_log_\"):\n+ self.megatron_lm_default_args[key.replace(\"no_\", \"\")] = True\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex eb5fda468..58009d03b 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -87,6 +87,10 @@ def is_bf16_available(ignore_tpu=False):\n return False\n \n \n+def is_megatron_lm_available():\n+ return importlib.util.find_spec(\"megatron\") is not None\n+\n+\n def is_transformers_available():\n return importlib.util.find_spec(\"transformers\") is not None\n \ndiff --git a/src/accelerate/utils/megatron_lm.py b/src/accelerate/utils/megatron_lm.py\nnew file mode 100644\nindex 000000000..cb988e360\n--- /dev/null\n+++ b/src/accelerate/utils/megatron_lm.py\n@@ -0,0 +1,1199 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import math\n+from abc import ABC\n+from functools import partial\n+\n+import torch\n+import torch.nn.functional as F\n+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n+\n+from ..optimizer import AcceleratedOptimizer\n+from ..scheduler import AcceleratedScheduler\n+from .imports import is_megatron_lm_available, is_transformers_available\n+from .operations import send_to_device\n+\n+\n+if is_transformers_available():\n+ from transformers.modeling_outputs import (\n+ CausalLMOutputWithCrossAttentions,\n+ Seq2SeqLMOutput,\n+ SequenceClassifierOutput,\n+ )\n+\n+\n+if is_megatron_lm_available():\n+ from megatron import (\n+ get_args,\n+ get_num_microbatches,\n+ get_tensorboard_writer,\n+ get_timers,\n+ mpu,\n+ print_rank_0,\n+ print_rank_last,\n+ )\n+ from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args\n+ from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint\n+ from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler\n+ from megatron.global_vars import set_global_variables\n+ from megatron.initialize import (\n+ _compile_dependencies,\n+ _init_autoresume,\n+ _set_random_seed,\n+ set_jit_fusion_options,\n+ write_args_to_tensorboard,\n+ )\n+ from megatron.model import BertModel, GPTModel, ModelType, T5Model\n+ from megatron.model.classification import Classification\n+ from megatron.optimizer import get_megatron_optimizer\n+ from megatron.schedules import get_forward_backward_func\n+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding\n+ from megatron.training import get_model, get_optimizer_param_scheduler, training_log\n+ from megatron.utils import (\n+ average_losses_across_data_parallel_group,\n+ calc_params_l2_norm,\n+ get_ltor_masks_and_position_ids,\n+ )\n+\n+\n+# model utilities\n+def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True):\n+ \"\"\"Build the model.\"\"\"\n+ args = get_args()\n+ mode = \"pre-training\" if args.pretraining_flag else \"fine-tuning\"\n+ if args.rank == 0:\n+ print(f\"Building {args.model_type_name} model in the {mode} mode.\")\n+ print(\n+ \"The Megatron LM model weights are initialized at random in `accelerator.prepare`. \"\n+ \"Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup.\"\n+ )\n+ if args.model_type_name == \"bert\":\n+ if args.pretraining_flag:\n+ num_tokentypes = 2 if args.bert_binary_head else 0\n+ model = BertModel(\n+ num_tokentypes=num_tokentypes,\n+ add_binary_head=args.bert_binary_head,\n+ parallel_output=True,\n+ pre_process=pre_process,\n+ post_process=post_process,\n+ )\n+ else:\n+\n+ model = Classification(\n+ num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process\n+ )\n+ elif args.model_type_name == \"gpt\":\n+ model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process)\n+ elif args.model_type_name == \"t5\":\n+ model = T5Model(\n+ num_tokentypes=0,\n+ parallel_output=True,\n+ pre_process=pre_process,\n+ post_process=post_process,\n+ add_encoder=add_encoder,\n+ add_decoder=add_decoder,\n+ )\n+ else:\n+ raise ValueError(f\"Unsupported model type: {args.model_type_name}\")\n+ return model\n+\n+\n+def prepare_model(accelerator):\n+ accelerator.print(\"Preparing model\")\n+ args = get_args()\n+ if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\n+ model_type = ModelType.encoder_or_decoder\n+ elif args.model_type_name == \"t5\":\n+ model_type = ModelType.encoder_and_decoder\n+ if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:\n+ args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2\n+ model = get_model(model_provider_func, model_type)\n+ return model\n+\n+\n+# dataloader utilities\n+class MegatronLMDummyDataLoader:\n+ \"\"\"\n+ Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training\n+\n+ Args:\n+ **dataset_kwargs: Megatron data arguments.\n+ \"\"\"\n+\n+ def __init__(self, **dataset_kwargs):\n+ parser = argparse.ArgumentParser()\n+ parser = _add_data_args(parser)\n+ parser = _add_validation_args(parser)\n+ data_args = parser.parse_known_args()\n+ self.dataset_args = vars(data_args[0])\n+ self.dataset_args.update(dataset_kwargs)\n+ self.dataset_args[\"megatron_dataset_flag\"] = True\n+\n+ def set_megatron_data_args(self):\n+ args = get_args()\n+ for key, value in self.dataset_args.items():\n+ setattr(args, key, value)\n+\n+ def get_train_valid_test_datasets_provider(self):\n+ def train_valid_test_datasets_provider(train_val_test_num_samples):\n+ \"\"\"Build train, valid, and test datasets.\"\"\"\n+ args = get_args()\n+ dataset_args = {\n+ \"data_prefix\": args.data_path,\n+ \"data_impl\": args.data_impl,\n+ \"splits_string\": args.split,\n+ \"train_valid_test_num_samples\": train_val_test_num_samples,\n+ \"skip_warmup\": (not args.mmap_warmup),\n+ \"seed\": args.seed,\n+ }\n+ if args.model_type_name == \"bert\":\n+ dataset_args.update(\n+ {\n+ \"max_seq_length\": args.seq_length,\n+ \"masked_lm_prob\": args.mask_prob,\n+ \"short_seq_prob\": args.short_seq_prob,\n+ \"binary_head\": args.bert_binary_head,\n+ }\n+ )\n+ elif args.model_type_name == \"gpt\":\n+ dataset_args.update(\n+ {\n+ \"seq_length\": args.seq_length,\n+ }\n+ )\n+ elif args.model_type_name == \"t5\":\n+ dataset_args.update(\n+ {\n+ \"max_seq_length\": args.encoder_seq_length,\n+ \"max_seq_length_dec\": args.decoder_seq_length,\n+ \"masked_lm_prob\": args.mask_prob,\n+ \"short_seq_prob\": args.short_seq_prob,\n+ \"dataset_type\": \"t5\",\n+ }\n+ )\n+ else:\n+ raise ValueError(f\"Unsupported model type: {args.model_type_name}\")\n+ if args.model_type_name == \"gpt\":\n+ from megatron.data.gpt_dataset import build_train_valid_test_datasets\n+ else:\n+ from megatron.data.dataset_utils import build_train_valid_test_datasets\n+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)\n+ return train_ds, valid_ds, test_ds\n+\n+ return train_valid_test_datasets_provider\n+\n+ def build_pretraining_data_loader(self, dataset, consumed_samples):\n+ if dataset is None:\n+ return None\n+ args = get_args()\n+ micro_batch_size = args.micro_batch_size * args.num_micro_batches\n+\n+ # Megatron sampler\n+ if args.dataloader_type == \"single\":\n+ batch_sampler = MegatronPretrainingSampler(\n+ total_samples=len(dataset),\n+ consumed_samples=consumed_samples,\n+ micro_batch_size=micro_batch_size,\n+ data_parallel_rank=mpu.get_data_parallel_rank(),\n+ data_parallel_size=mpu.get_data_parallel_world_size(),\n+ )\n+ elif args.dataloader_type == \"cyclic\":\n+ batch_sampler = MegatronPretrainingRandomSampler(\n+ dataset,\n+ total_samples=len(dataset),\n+ consumed_samples=consumed_samples,\n+ micro_batch_size=micro_batch_size,\n+ data_parallel_rank=mpu.get_data_parallel_rank(),\n+ data_parallel_size=mpu.get_data_parallel_world_size(),\n+ data_sharding=args.data_sharding,\n+ )\n+ else:\n+ raise Exception(\"{} dataloader type is not supported.\".format(args.dataloader_type))\n+\n+ # Torch dataloader.\n+ return torch.utils.data.DataLoader(\n+ dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True\n+ )\n+\n+ def build_train_valid_test_data_iterators(self):\n+ def cyclic_iter(iter):\n+ while True:\n+ for x in iter:\n+ yield x\n+\n+ args = get_args()\n+\n+ (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)\n+\n+ print_rank_0(\"> building train, validation, and test datasets ...\")\n+\n+ # Backward compatibility, assume fixed batch size.\n+ if args.iteration > 0 and args.consumed_train_samples == 0:\n+ assert args.train_samples is None, \"only backward compatiblity support for iteration-based training\"\n+ args.consumed_train_samples = args.iteration * args.global_batch_size\n+ if args.iteration > 0 and args.consumed_valid_samples == 0:\n+ if args.train_samples is None:\n+ args.consumed_valid_samples = (\n+ (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size\n+ )\n+\n+ # Data loader only on rank 0 of each model parallel group.\n+ if mpu.get_tensor_model_parallel_rank() == 0:\n+\n+ # Number of train/valid/test samples.\n+ if args.train_samples:\n+ train_samples = args.train_samples\n+ else:\n+ train_samples = args.train_iters * args.global_batch_size\n+ eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters\n+ test_iters = args.eval_iters\n+ train_val_test_num_samples = [\n+ train_samples,\n+ eval_iters * args.global_batch_size,\n+ test_iters * args.global_batch_size,\n+ ]\n+ print_rank_0(\" > datasets target sizes (minimum size):\")\n+ print_rank_0(\" train: {}\".format(train_val_test_num_samples[0]))\n+ print_rank_0(\" validation: {}\".format(train_val_test_num_samples[1]))\n+ print_rank_0(\" test: {}\".format(train_val_test_num_samples[2]))\n+\n+ # Build the datasets.\n+ train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider()\n+ train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples)\n+\n+ # Build dataloders.\n+ train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples)\n+ valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples)\n+ test_dataloader = self.build_pretraining_data_loader(test_ds, 0)\n+\n+ # Flags to know if we need to do training/validation/testing.\n+ do_train = train_dataloader is not None and args.train_iters > 0\n+ do_valid = valid_dataloader is not None and args.eval_iters > 0\n+ do_test = test_dataloader is not None and args.eval_iters > 0\n+ # Need to broadcast num_tokens and num_type_tokens.\n+ flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)])\n+ else:\n+ flags = torch.cuda.LongTensor([0, 0, 0])\n+\n+ # Broadcast num tokens.\n+ torch.distributed.broadcast(\n+ flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()\n+ )\n+ args.do_train = flags[0].item()\n+ args.do_valid = flags[1].item()\n+ args.do_test = flags[2].item()\n+\n+ # Build iterators.\n+ dl_type = args.dataloader_type\n+ assert dl_type in [\"single\", \"cyclic\"]\n+\n+ if train_dataloader is not None:\n+ train_data_iterator = (\n+ iter(train_dataloader) if dl_type == \"single\" else iter(cyclic_iter(train_dataloader))\n+ )\n+ else:\n+ train_data_iterator = None\n+\n+ if valid_dataloader is not None:\n+ valid_data_iterator = (\n+ iter(valid_dataloader) if dl_type == \"single\" else iter(cyclic_iter(valid_dataloader))\n+ )\n+ else:\n+ valid_data_iterator = None\n+\n+ if test_dataloader is not None:\n+ test_data_iterator = iter(test_dataloader) if dl_type == \"single\" else iter(cyclic_iter(test_dataloader))\n+ else:\n+ test_data_iterator = None\n+\n+ return train_data_iterator, valid_data_iterator, test_data_iterator\n+\n+\n+def prepare_data_loader(accelerator, dataloader):\n+ accelerator.print(\"Preparing dataloader\")\n+ args = get_args()\n+ if not args.megatron_dataset_flag:\n+ from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader\n+\n+ args = get_args()\n+ micro_batch_size = args.micro_batch_size * args.num_micro_batches\n+ kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS}\n+ if kwargs[\"batch_size\"] is None:\n+ if isinstance(kwargs[\"sampler\"], torch.utils.data.BatchSampler):\n+ kwargs[\"sampler\"].batch_size = micro_batch_size\n+ else:\n+ del kwargs[\"sampler\"]\n+ del kwargs[\"shuffle\"]\n+ del kwargs[\"batch_size\"]\n+ kwargs[\"batch_sampler\"].batch_size = micro_batch_size\n+ else:\n+ del kwargs[\"batch_sampler\"]\n+ kwargs[\"batch_size\"] = micro_batch_size\n+\n+ dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs)\n+ return prepare_data_loader(\n+ dataloader,\n+ accelerator.device,\n+ num_processes=mpu.get_data_parallel_world_size(),\n+ process_index=mpu.get_data_parallel_rank(),\n+ split_batches=accelerator.split_batches,\n+ put_on_device=True,\n+ rng_types=accelerator.rng_types.copy(),\n+ dispatch_batches=accelerator.dispatch_batches,\n+ )\n+ else:\n+ if args.consumed_samples is not None:\n+ (\n+ args.consumed_train_samples,\n+ args.consumed_valid_samples,\n+ args.consumed_test_samples,\n+ ) = args.consumed_samples\n+ else:\n+ args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0\n+ (\n+ train_data_iterator,\n+ valid_data_iterator,\n+ test_data_iterator,\n+ ) = dataloader.build_train_valid_test_data_iterators()\n+ return train_data_iterator, valid_data_iterator, test_data_iterator\n+\n+\n+# optimizer utilities\n+class MegatronLMOptimizerWrapper(AcceleratedOptimizer):\n+ def __init__(self, optimizer):\n+ super().__init__(optimizer, device_placement=False, scaler=None)\n+\n+ def zero_grad(self, set_to_none=None):\n+ pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed\n+\n+ def step(self):\n+ pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed\n+\n+ @property\n+ def step_was_skipped(self):\n+ \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n+ return self.optimizer.skipped_iter\n+\n+\n+def prepare_optimizer(accelerator, model):\n+ accelerator.print(\"Preparing optimizer\")\n+ args = get_args()\n+ optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)\n+ return optimizer\n+\n+\n+# scheduler utilities\n+class MegatronLMDummyScheduler:\n+ \"\"\"\n+ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training\n+ loop when scheduler config is specified in the deepspeed config file.\n+\n+ Args:\n+ optimizer (`torch.optim.optimizer.Optimizer`):\n+ The optimizer to wrap.\n+ total_num_steps (int):\n+ Total number of steps.\n+ warmup_num_steps (int):\n+ Number of steps for warmup.\n+ **kwargs:\n+ Other arguments.\n+ \"\"\"\n+\n+ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):\n+ self.optimizer = optimizer\n+ self.total_num_steps = total_num_steps\n+ self.warmup_num_steps = warmup_num_steps\n+ self.kwargs = kwargs\n+\n+\n+class MegatronLMSchedulerWrapper(AcceleratedScheduler):\n+ def __init__(self, scheduler, optimizers):\n+ super().__init__(scheduler, optimizers)\n+\n+ def step(self, *args, **kwargs):\n+ return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed\n+\n+\n+def prepare_scheduler(accelerator, optimizer, scheduler):\n+ accelerator.print(\"Preparing scheduler\")\n+ scheduler = get_optimizer_param_scheduler(optimizer)\n+ return scheduler\n+\n+\n+class AbstractTrainStep(ABC):\n+ \"\"\"Abstract class for batching, forward pass and loss handler.\"\"\"\n+\n+ def __init__(self, name):\n+ super().__init__()\n+ self.name = name\n+\n+ def get_batch_func(self):\n+ pass\n+\n+ def get_forward_step_func(self):\n+ pass\n+\n+ def get_loss_func(self):\n+ pass\n+\n+\n+class BertTrainStep(AbstractTrainStep):\n+ \"\"\"\n+ Bert train step class.\n+\n+ Args:\n+ args (`argparse.Namespace`): Megatron-LM arguments.\n+ \"\"\"\n+\n+ def __init__(self, args):\n+ super().__init__(\"BertTrainStep\")\n+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)\n+ self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels)\n+ self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head)\n+ if not args.model_return_dict:\n+ self.model_output_class = None\n+ else:\n+ self.model_output_class = SequenceClassifierOutput\n+\n+ def get_batch_func(self, megatron_dataset_flag):\n+ def get_batch_megatron(data_iterator):\n+ \"\"\"Build the batch.\"\"\"\n+\n+ # Items and their type.\n+ keys = [\"text\", \"types\", \"labels\", \"is_random\", \"loss_mask\", \"padding_mask\"]\n+ datatype = torch.int64\n+\n+ # Broadcast data.\n+ if data_iterator is not None:\n+ data = next(data_iterator)\n+ else:\n+ data = None\n+ data_b = mpu.broadcast_data(keys, data, datatype)\n+\n+ # Unpack.\n+ tokens = data_b[\"text\"].long()\n+ types = data_b[\"types\"].long()\n+ sentence_order = data_b[\"is_random\"].long()\n+ loss_mask = data_b[\"loss_mask\"].float()\n+ lm_labels = data_b[\"labels\"].long()\n+ padding_mask = data_b[\"padding_mask\"].long()\n+\n+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask\n+\n+ def get_batch_transformer(data_iterator):\n+ \"\"\"Build the batch.\"\"\"\n+ data = next(data_iterator)\n+ data = send_to_device(data, torch.cuda.current_device())\n+\n+ # Unpack.\n+ tokens = data[\"input_ids\"].long()\n+ padding_mask = data[\"attention_mask\"].long()\n+ if \"token_type_ids\" in data:\n+ types = data[\"token_type_ids\"].long()\n+ else:\n+ types = None\n+ if \"labels\" in data:\n+ lm_labels = data[\"labels\"].long()\n+ loss_mask = (data[\"labels\"] != -100).to(torch.float)\n+ else:\n+ lm_labels = None\n+ loss_mask = None\n+ if \"next_sentence_label\" in data:\n+ sentence_order = data[\"next_sentence_label\"].long()\n+ else:\n+ sentence_order = None\n+\n+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask\n+\n+ if megatron_dataset_flag:\n+ return get_batch_megatron\n+ else:\n+ return get_batch_transformer\n+\n+ def get_loss_func(self, pretraining_flag, num_labels):\n+ def loss_func_pretrain(loss_mask, sentence_order, output_tensor):\n+ lm_loss_, sop_logits = output_tensor\n+\n+ lm_loss_ = lm_loss_.float()\n+ loss_mask = loss_mask.float()\n+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()\n+\n+ if sop_logits is not None:\n+ sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)\n+ sop_loss = sop_loss.float()\n+ loss = lm_loss + sop_loss\n+ averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss])\n+ return loss, {\"lm loss\": averaged_losses[0], \"sop loss\": averaged_losses[1]}\n+\n+ else:\n+ loss = lm_loss\n+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])\n+ return loss, {\"lm loss\": averaged_losses[0]}\n+\n+ def loss_func_finetune(labels, logits):\n+ if num_labels == 1:\n+ # We are doing regression\n+ loss_fct = MSELoss()\n+ loss = loss_fct(logits.view(-1), labels.view(-1))\n+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n+ loss_fct = CrossEntropyLoss()\n+ loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))\n+ else:\n+ loss_fct = BCEWithLogitsLoss()\n+ loss = loss_fct(logits, labels)\n+ averaged_losses = average_losses_across_data_parallel_group([loss])\n+ return loss, {\"loss\": averaged_losses[0]}\n+\n+ if pretraining_flag:\n+ return loss_func_pretrain\n+ else:\n+ return loss_func_finetune\n+\n+ def get_forward_step_func(self, pretraining_flag, bert_binary_head):\n+ def forward_step(data_iterator, model):\n+ \"\"\"Forward step.\"\"\"\n+ tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator)\n+ if not bert_binary_head:\n+ types = None\n+ # Forward pass through the model.\n+ if pretraining_flag:\n+ output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels)\n+ return output_tensor, partial(self.loss_func, loss_mask, sentence_order)\n+ else:\n+ logits = model(tokens, padding_mask, tokentype_ids=types)\n+ return logits, partial(self.loss_func, labels)\n+\n+ return forward_step\n+\n+\n+class GPTTrainStep(AbstractTrainStep):\n+ \"\"\"\n+ GPT train step class.\n+\n+ Args:\n+ args (`argparse.Namespace`): Megatron-LM arguments.\n+ \"\"\"\n+\n+ def __init__(self, args):\n+ super().__init__(\"GPTTrainStep\")\n+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)\n+ self.loss_func = self.get_loss_func()\n+ self.forward_step = self.get_forward_step_func()\n+ self.eod_token = args.padded_vocab_size - 1\n+ if not args.model_return_dict:\n+ self.model_output_class = None\n+ else:\n+ self.model_output_class = CausalLMOutputWithCrossAttentions\n+\n+ def get_batch_func(self, megatron_dataset_flag):\n+ def get_batch_megatron(data_iterator):\n+ \"\"\"Generate a batch\"\"\"\n+ args = get_args()\n+\n+ # Items and their type.\n+ keys = [\"text\"]\n+ datatype = torch.int64\n+\n+ # Broadcast data.\n+ if data_iterator is not None:\n+ data = next(data_iterator)\n+ else:\n+ data = None\n+ data_b = mpu.broadcast_data(keys, data, datatype)\n+\n+ # Unpack.\n+ tokens_ = data_b[\"text\"].long()\n+ labels = tokens_[:, 1:].contiguous()\n+ tokens = tokens_[:, :-1].contiguous()\n+\n+ # Get the masks and postition ids.\n+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\n+ tokens, self.eod_token, args.reset_position_ids, args.reset_attention_mask, args.eod_mask_loss\n+ )\n+\n+ return tokens, labels, loss_mask, attention_mask, position_ids\n+\n+ def get_batch_transformer(data_iterator):\n+ data = next(data_iterator)\n+ data = {\"input_ids\": data[\"input_ids\"]}\n+ data = send_to_device(data, torch.cuda.current_device())\n+\n+ tokens_ = data[\"input_ids\"].long()\n+ padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token\n+ tokens_ = torch.concat([tokens_, padding], dim=1)\n+ labels = tokens_[:, 1:].contiguous()\n+ tokens = tokens_[:, :-1].contiguous()\n+ # Get the masks and postition ids.\n+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(\n+ tokens, self.eod_token, False, False, True\n+ )\n+ return tokens, labels, loss_mask, attention_mask, position_ids\n+\n+ if megatron_dataset_flag:\n+ return get_batch_megatron\n+ else:\n+ return get_batch_transformer\n+\n+ def get_loss_func(self):\n+ def loss_func(loss_mask, output_tensor):\n+ losses = output_tensor.float()\n+ loss_mask = loss_mask.view(-1).float()\n+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()\n+\n+ # Reduce loss for logging.\n+ averaged_loss = average_losses_across_data_parallel_group([loss])\n+\n+ return loss, {\"lm loss\": averaged_loss[0]}\n+\n+ return loss_func\n+\n+ def get_forward_step_func(self):\n+ def forward_step(data_iterator, model):\n+ \"\"\"Forward step.\"\"\"\n+ # Get the batch.\n+ tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)\n+ output_tensor = model(tokens, position_ids, attention_mask, labels=labels)\n+\n+ return output_tensor, partial(self.loss_func, loss_mask)\n+\n+ return forward_step\n+\n+\n+class T5TrainStep(AbstractTrainStep):\n+ \"\"\"\n+ T5 train step class.\n+\n+ Args:\n+ args (`argparse.Namespace`): Megatron-LM arguments.\n+ \"\"\"\n+\n+ def __init__(self, args):\n+ super().__init__(\"T5TrainStep\")\n+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)\n+ self.loss_func = self.get_loss_func()\n+ self.forward_step = self.get_forward_step_func()\n+ if not args.model_return_dict:\n+ self.model_output_class = None\n+ else:\n+ self.model_output_class = Seq2SeqLMOutput\n+\n+ @staticmethod\n+ def attn_mask_postprocess(attention_mask):\n+ # We create a 3D attention mask from a 2D tensor mask.\n+ # [b, 1, s]\n+ attention_mask_b1s = attention_mask.unsqueeze(1)\n+ # [b, s, 1]\n+ attention_mask_bs1 = attention_mask.unsqueeze(2)\n+ # [b, s, s]\n+ attention_mask_bss = attention_mask_b1s * attention_mask_bs1\n+ # Convert attention mask to binary:\n+ extended_attention_mask = attention_mask_bss < 0.5\n+ return extended_attention_mask\n+\n+ @staticmethod\n+ def get_decoder_mask(seq_length, device):\n+ attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))\n+ attention_mask = attention_mask < 0.5\n+ return attention_mask\n+\n+ @staticmethod\n+ def get_enc_dec_mask(attention_mask, dec_seq_length, device):\n+ batch_size, _ = attention_mask.shape\n+ # We create a 3D attention mask from a 2D tensor mask.\n+ # [b, 1, s]\n+ attention_mask_b1s = attention_mask.unsqueeze(1)\n+ # [b, s, 1]\n+ attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device)\n+ attention_mask_bss = attention_mask_bs1 * attention_mask_b1s\n+ extended_attention_mask = attention_mask_bss < 0.5\n+ return extended_attention_mask\n+\n+ def get_batch_func(self, megatron_dataset_flag):\n+ def get_batch_megatron(data_iterator):\n+ \"\"\"Build the batch.\"\"\"\n+\n+ keys = [\"text_enc\", \"text_dec\", \"labels\", \"loss_mask\", \"enc_mask\", \"dec_mask\", \"enc_dec_mask\"]\n+ datatype = torch.int64\n+\n+ # Broadcast data.\n+ if data_iterator is not None:\n+ data = next(data_iterator)\n+ else:\n+ data = None\n+ data_b = mpu.broadcast_data(keys, data, datatype)\n+\n+ # Unpack.\n+ tokens_enc = data_b[\"text_enc\"].long()\n+ tokens_dec = data_b[\"text_dec\"].long()\n+ labels = data_b[\"labels\"].long()\n+ loss_mask = data_b[\"loss_mask\"].float()\n+\n+ enc_mask = data_b[\"enc_mask\"] < 0.5\n+ dec_mask = data_b[\"dec_mask\"] < 0.5\n+ enc_dec_mask = data_b[\"enc_dec_mask\"] < 0.5\n+\n+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask\n+\n+ def get_batch_transformer(data_iterator):\n+ \"\"\"Build the batch.\"\"\"\n+ data = next(data_iterator)\n+ data = send_to_device(data, torch.cuda.current_device())\n+\n+ tokens_enc = data[\"input_ids\"].long()\n+ labels = data[\"labels\"].long()\n+ loss_mask = (labels != -100).to(torch.float)\n+ if \"decoder_input_ids\" in data:\n+ tokens_dec = data[\"decoder_input_ids\"].long()\n+ else:\n+ tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long)\n+ tokens_dec[..., 1:] = labels[..., :-1].clone()\n+ tokens_dec[..., 0] = 0\n+ tokens_dec.masked_fill_(tokens_dec == -100, 0)\n+ enc_mask = T5TrainStep.attn_mask_postprocess(data[\"attention_mask\"].long())\n+ dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device)\n+ enc_dec_mask = T5TrainStep.get_enc_dec_mask(\n+ data[\"attention_mask\"].long(), tokens_dec.shape[1], tokens_dec.device\n+ )\n+\n+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask\n+\n+ if megatron_dataset_flag:\n+ return get_batch_megatron\n+ else:\n+ return get_batch_transformer\n+\n+ def get_loss_func(self):\n+ def loss_func(loss_mask, output_tensor):\n+ lm_loss_ = output_tensor.float()\n+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()\n+\n+ loss = lm_loss\n+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])\n+\n+ return loss, {\"lm loss\": averaged_losses[0]}\n+\n+ return loss_func\n+\n+ def get_forward_step_func(self):\n+ def forward_step(data_iterator, model):\n+ \"\"\"Forward step.\"\"\"\n+ # Get the batch.\n+ tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch(\n+ data_iterator\n+ )\n+ # Forward model lm_labels\n+ output_tensor = model(\n+ tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels\n+ )\n+\n+ return output_tensor, partial(self.loss_func, loss_mask)\n+\n+ return forward_step\n+\n+\n+# intialize megatron setup\n+def initialize(accelerator, extra_args_provider=None, args_defaults={}):\n+ accelerator.print(\"Initializing Megatron-LM\")\n+ assert torch.cuda.is_available(), \"Megatron requires CUDA.\"\n+\n+ # Parse arguments\n+ args = parse_args(extra_args_provider, ignore_unknown_args=True)\n+\n+ # Set defaults\n+ for key, value in args_defaults.items():\n+ if getattr(args, key, None) is not None:\n+ if args.rank == 0:\n+ print(\n+ \"WARNING: overriding default arguments for {key}:{v} \\\n+ with {key}:{v2}\".format(\n+ key=key, v=getattr(args, key), v2=value\n+ ),\n+ flush=True,\n+ )\n+ setattr(args, key, value)\n+\n+ if args.use_checkpoint_args or args_defaults.get(\"use_checkpoint_args\", False):\n+ assert args.load is not None, \"--use-checkpoints-args requires --load argument\"\n+ load_args_from_checkpoint(args)\n+\n+ validate_args(args)\n+\n+ # set global args, build tokenizer, and set adlr-autoresume,\n+ # tensorboard-writer, and timers.\n+ set_global_variables(args)\n+\n+ # torch.distributed initialization\n+ def finish_mpu_init():\n+ args = get_args()\n+ # Pytorch distributed.\n+ device_count = torch.cuda.device_count()\n+ args.rank = torch.distributed.get_rank()\n+ args.world_size = torch.distributed.get_world_size()\n+ if device_count > 0:\n+ device = args.rank % device_count\n+ if args.local_rank is not None:\n+ assert args.local_rank == device, \"expected local-rank to be the same as rank % device-count.\"\n+ else:\n+ args.local_rank = device\n+\n+ # Set the tensor model-parallel, pipeline model-parallel, and\n+ # data-parallel communicators.\n+ if mpu.model_parallel_is_initialized():\n+ print(\"model parallel is already initialized\")\n+ else:\n+ mpu.initialize_model_parallel(\n+ args.tensor_model_parallel_size,\n+ args.pipeline_model_parallel_size,\n+ args.virtual_pipeline_model_parallel_size,\n+ args.pipeline_model_parallel_split_rank,\n+ )\n+\n+ # Random seeds for reproducibility.\n+ if args.rank == 0:\n+ print(\"> setting random seeds to {} ...\".format(args.seed))\n+ _set_random_seed(args.seed, args.data_parallel_random_init)\n+\n+ args = get_args()\n+\n+ # Megatron's MPU is the master. Complete initialization right away.\n+ finish_mpu_init()\n+\n+ # Autoresume.\n+ _init_autoresume()\n+\n+ # Compile dependencies.\n+ _compile_dependencies()\n+\n+ # Set pytorch JIT layer fusion options and warmup JIT functions.\n+ set_jit_fusion_options()\n+ args = get_args()\n+ args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args)\n+ if args.model_type_name == \"bert\" and args.pretraining_flag and args.num_labels == 2:\n+ args.bert_binary_head = True\n+ else:\n+ args.bert_binary_head = False\n+ args.iteration = 0\n+\n+\n+class MegatronEngine(torch.nn.Module):\n+ \"\"\"\n+ Megatron-LM model wrapper\n+\n+ Args:\n+ accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use.\n+ model: Megatron-LM model\n+ optimizer: Megatron-LM optimizer\n+ lr_scheduler: Megatron-LM lr scheduler\n+ \"\"\"\n+\n+ def __init__(self, accelerator, model, optimizer, scheduler):\n+ super(MegatronEngine, self).__init__()\n+ self.module = model\n+ self.base_model = model[0]\n+ self.optimizer = optimizer\n+ self.scheduler = scheduler\n+ args = get_args()\n+ if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None:\n+ self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class(\n+ args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs\n+ )\n+ elif args.model_type_name == \"bert\":\n+ self.train_step_handler = BertTrainStep(args)\n+ elif args.model_type_name == \"gpt\":\n+ self.train_step_handler = GPTTrainStep(args)\n+ elif args.model_type_name == \"t5\":\n+ self.train_step_handler = T5TrainStep(args)\n+ else:\n+ raise ValueError(f\"Unsupported model type: {args.model_type_name}\")\n+ self.optimizer.skipped_iter = False\n+\n+ # Tracking loss.\n+ self.total_loss_dict = {}\n+ self.eval_total_loss_dict = {}\n+ self.iteration = 0\n+ self.report_memory_flag = True\n+ if args.tensorboard_dir is not None:\n+ write_args_to_tensorboard()\n+\n+ def train(self):\n+ for model_module in self.module:\n+ model_module.train()\n+ self.log_eval_results()\n+\n+ def eval(self):\n+ for model_module in self.module:\n+ model_module.eval()\n+\n+ def train_step(self, **batch_data):\n+ \"\"\"\n+ Training step for Megatron-LM\n+\n+ Args:\n+ batch_data (:obj:`dict`): The batch data to train on.\n+ \"\"\"\n+\n+ args = get_args()\n+ timers = get_timers()\n+\n+ if len(batch_data) > 0:\n+ data_chunks = []\n+ if args.num_micro_batches > 1:\n+ for i in range(0, args.num_micro_batches):\n+ data_chunks.append(\n+ {\n+ k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size]\n+ for k, v in batch_data.items()\n+ }\n+ )\n+ else:\n+ data_chunks = [batch_data]\n+\n+ if len(self.module) > 1:\n+ batch_data_iterator = (\n+ [iter(data_chunks) for _ in range(len(self.module))]\n+ if len(batch_data) > 0\n+ else [None] * len(self.module)\n+ )\n+ else:\n+ batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None\n+\n+ # Set grad to zero.\n+ if args.DDP_impl == \"local\" and args.use_contiguous_buffers_in_local_ddp:\n+ for partition in self.module:\n+ partition.zero_grad_buffer()\n+ self.optimizer.zero_grad()\n+\n+ # Forward pass.\n+ forward_backward_func = get_forward_backward_func()\n+ losses_reduced = forward_backward_func(\n+ self.train_step_handler.forward_step,\n+ batch_data_iterator,\n+ self.module,\n+ self.optimizer,\n+ None,\n+ forward_only=False,\n+ )\n+\n+ # Empty unused memory.\n+ if args.empty_unused_memory_level >= 1:\n+ torch.cuda.empty_cache()\n+\n+ # Reduce gradients.\n+ timers(\"backward-reduce-model-grads\").start()\n+ self.optimizer.reduce_model_grads(args, timers)\n+ timers(\"backward-reduce-model-grads\").stop()\n+\n+ # Update parameters.\n+ timers(\"optimizer\").start()\n+ update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers)\n+ timers(\"optimizer\").stop()\n+\n+ # Gather params.\n+ if update_successful:\n+ timers(\"backward-gather-model-params\").start()\n+ self.optimizer.gather_model_params(args, timers)\n+ timers(\"backward-gather-model-params\").stop()\n+\n+ # Update learning rate.\n+ if update_successful:\n+ if self.scheduler is not None:\n+ increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size\n+ self.scheduler.step(increment=increment)\n+ skipped_iter = 0\n+ else:\n+ skipped_iter = 1\n+\n+ self.optimizer.skipped_iter = not update_successful\n+\n+ # Empty unused memory.\n+ if args.empty_unused_memory_level >= 2:\n+ torch.cuda.empty_cache()\n+\n+ args.consumed_train_samples += (\n+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()\n+ )\n+\n+ if mpu.is_pipeline_last_stage(ignore_virtual=True):\n+ # Average loss across microbatches.\n+ loss_reduced = {}\n+ for key in losses_reduced[0]:\n+ losses_reduced_for_key = [x[key] for x in losses_reduced]\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n+ return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad\n+ return {}, skipped_iter, grad_norm, num_zeros_in_grad\n+\n+ def eval_step(self, **batch_data):\n+ \"\"\"\n+ Evaluation step for Megatron-LM\n+\n+ Args:\n+ batch_data (:obj:`dict`): The batch data to evaluate on.\n+ \"\"\"\n+\n+ args = get_args()\n+ data_chunks = []\n+ if args.num_micro_batches > 1:\n+ for i in range(0, args.num_micro_batches):\n+ data_chunks.append(\n+ {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()}\n+ )\n+ else:\n+ data_chunks = [batch_data]\n+\n+ if len(self.module) > 1:\n+ batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))]\n+ else:\n+ batch_data_iterator = iter(data_chunks)\n+ forward_backward_func = get_forward_backward_func()\n+ loss_dicts = forward_backward_func(\n+ self.train_step_handler.forward_step,\n+ batch_data_iterator,\n+ self.module,\n+ optimizer=None,\n+ timers=None,\n+ forward_only=True,\n+ )\n+ # Empty unused memory\n+ if args.empty_unused_memory_level >= 1:\n+ torch.cuda.empty_cache()\n+\n+ args.consumed_valid_samples += (\n+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()\n+ )\n+\n+ if mpu.is_pipeline_last_stage(ignore_virtual=True):\n+ # Average loss across microbatches.\n+ loss_reduced = {}\n+ for key in loss_dicts[0]:\n+ losses_reduced_for_key = [x[key] for x in loss_dicts]\n+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)\n+ return loss_reduced\n+ else:\n+ return {}\n+\n+ def forward(self, **batch_data):\n+ # During training, we use train_step()\n+ # model(**batch_data) performs following operations by delegating it to `self.train_step`:\n+ # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism\n+ # 2. Set grad to zero.\n+ # 3. forward pass and backward pass using Pipeline Parallelism\n+ # 4. Empty unused memory.\n+ # 5. Reduce gradients.\n+ # 6. Update parameters.\n+ # 7. Gather params when using Distributed Optimizer (Data Parallelism).\n+ # 8. Update learning rate if scheduler is specified.\n+ # 9. Empty unused memory.\n+ # 10. Average loss across microbatches and across DP ranks.\n+ #\n+ # During evaluation, we use eval_step()\n+ args = get_args()\n+ if self.module[0].training:\n+ loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data)\n+ self.iteration += 1\n+ if args.tensorboard_dir is not None:\n+ # Logging.\n+ loss_scale = self.optimizer.get_loss_scale().item()\n+ params_norm = None\n+ if args.log_params_norm:\n+ params_norm = calc_params_l2_norm(self.model)\n+ self.report_memory_flag = training_log(\n+ loss_dict,\n+ self.total_loss_dict,\n+ self.optimizer.param_groups[0][\"lr\"],\n+ self.iteration,\n+ loss_scale,\n+ self.report_memory_flag,\n+ skipped_iter,\n+ grad_norm,\n+ params_norm,\n+ num_zeros_in_grad,\n+ )\n+ else:\n+ loss_dict = self.eval_step(**batch_data)\n+ if args.tensorboard_dir is not None:\n+ for key in loss_dict:\n+ self.eval_total_loss_dict[key] = (\n+ self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]\n+ )\n+ self.eval_total_loss_dict[key + \"_num_iters\"] = self.eval_total_loss_dict.get(\n+ key + \"_num_iters\", torch.cuda.FloatTensor([0.0])\n+ ) + torch.cuda.FloatTensor([1.0])\n+\n+ loss = torch.tensor(0.0, device=args.local_rank)\n+ for key in loss_dict:\n+ loss += loss_dict[key]\n+ # loss = reduce(loss)\n+ if self.train_step_handler.model_output_class is not None:\n+ return self.train_step_handler.model_output_class(loss=loss)\n+ return loss\n+\n+ def log_eval_results(self):\n+ args = get_args()\n+ if args.tensorboard_dir is None or self.iteration == 0:\n+ return\n+ args = get_args()\n+ writer = get_tensorboard_writer()\n+ string = f\"validation loss at iteration {self.iteration} | \"\n+ for key in self.eval_total_loss_dict:\n+ if key.endswith(\"_num_iters\"):\n+ continue\n+ value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + \"_num_iters\"]\n+ string += f\"{key} value: {value} | \"\n+ ppl = math.exp(min(20, value.item()))\n+ if args.pretraining_flag:\n+ string += f\"{key} PPL: {ppl} | \"\n+ if writer:\n+ writer.add_scalar(f\"{key} validation\", value.item(), self.iteration)\n+ if args.pretraining_flag:\n+ writer.add_scalar(f\"{key} validation ppl\", ppl, self.iteration)\n+\n+ length = len(string) + 1\n+ print_rank_last(\"-\" * length)\n+ print_rank_last(string)\n+ print_rank_last(\"-\" * length)\n+ self.eval_total_loss_dict = {}\n+\n+ def save_checkpoint(self, output_dir):\n+ self.log_eval_results()\n+ args = get_args()\n+ args.save = output_dir\n+ torch.distributed.barrier()\n+ save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)\n+ torch.distributed.barrier()\n+\n+ def load_checkpoint(self, input_dir):\n+ args = get_args()\n+ args.load = input_dir\n+ args.consumed_train_samples = 0\n+ args.consumed_valid_samples = 0\n+ torch.distributed.barrier()\n+ iteration = load_checkpoint(self.module, self.optimizer, self.scheduler)\n+ torch.distributed.barrier()\n+ self.iteration = iteration\n+ if args.fp16 and self.iteration == 0:\n+ self.optimizer.reload_model_params()\n+\n+\n+# other utilities\n+def avg_losses_across_data_parallel_group(losses):\n+ \"\"\"\n+ Average losses across data parallel group.\n+\n+ Args:\n+ losses (List[Tensor]): List of losses to average across data parallel group.\n+ \"\"\"\n+\n+ return average_losses_across_data_parallel_group(losses)\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 3ba46a03f..aef1fd589 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -224,6 +224,7 @@ def gather(tensor):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ]:\n return _gpu_gather(tensor)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n@@ -261,6 +262,7 @@ def gather_object(object: Any):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ]:\n return _gpu_gather_object(object)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n@@ -304,6 +306,7 @@ def broadcast(tensor, from_process: int = 0):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ]:\n return _gpu_broadcast(tensor, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n@@ -332,6 +335,7 @@ def broadcast_object_list(object_list, from_process: int = 0):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ]:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n@@ -452,6 +456,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n ]:\n torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\n return cloned_tensor\ndiff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\nindex e95ed03bf..8dc149a02 100644\n--- a/src/accelerate/utils/random.py\n+++ b/src/accelerate/utils/random.py\n@@ -64,7 +64,12 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n- elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU, DistributedType.FSDP]:\n+ elif state.distributed_type in [\n+ DistributedType.DEEPSPEED,\n+ DistributedType.MULTI_GPU,\n+ DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,\n+ ]:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 9197070c8..05688f2b5 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -39,6 +39,7 @@\n \"automatic_gradient_accumulation.py\",\n \"fsdp_with_peak_mem_tracking.py\",\n \"deepspeed_with_config_support.py\",\n+ \"megatron_lm_gpt_pretraining.py\",\n ]\n \n \n", "code_comments": [ { "body": "```suggestion\r\n self.megatron_lm_default_args = {\r\n```\r\nAlso you use `megatronlm_plugin` with no `_` between `megatron` and `lm`., just flagging this so you stay consistent (I prefer `megatron_lm` personally.", "diff_hunk": "@@ -643,3 +645,142 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\n # called from all ranks, though only rank0 has a valid param for full_osd\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\n optimizer.load_state_dict(sharded_osd)\n+\n+\n+@dataclass\n+class MegatronLMPlugin:\n+ \"\"\"\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\n+ activation recomputation and optimized fused kernels.\n+ \"\"\"\n+\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\n+ gradient_clipping: float = field(\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\n+ )\n+ sequence_parallelism: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable sequence parallelism\"},\n+ )\n+ recompute_activation: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable selective activation recomputation\"},\n+ )\n+ use_distributed_optimizer: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable distributed optimizer\"},\n+ )\n+ pipeline_model_parallel_split_rank: int = field(\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\n+ )\n+ num_layers_per_virtual_pipeline_stage: int = field(\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\n+ )\n+\n+ def __post_init__(self):\n+ prefix = \"MEGATRON_\"\n+ if self.tp_degree is None:\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\n+ if self.pp_degree is None:\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\n+ if self.num_micro_batches is None:\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\n+ if self.gradient_clipping is None:\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\n+ if self.recompute_activation is None:\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\n+ if self.use_distributed_optimizer is None:\n+ self.use_distributed_optimizer = (\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\n+ )\n+ if self.sequence_parallelism is None:\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\n+\n+ if self.pp_degree > 1:\n+ self.DDP_impl = \"local\"\n+ else:\n+ self.DDP_impl = \"torch\"\n+\n+ self.megtron_lm_default_args = {", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -643,3 +645,142 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\n # called from all ranks, though only rank0 has a valid param for full_osd\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\n optimizer.load_state_dict(sharded_osd)\n+\n+\n+@dataclass\n+class MegatronLMPlugin:\n+ \"\"\"\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\n+ activation recomputation and optimized fused kernels.\n+ \"\"\"\n+\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\n+ gradient_clipping: float = field(\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\n+ )\n+ sequence_parallelism: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable sequence parallelism\"},\n+ )\n+ recompute_activation: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable selective activation recomputation\"},\n+ )\n+ use_distributed_optimizer: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable distributed optimizer\"},\n+ )\n+ pipeline_model_parallel_split_rank: int = field(\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\n+ )\n+ num_layers_per_virtual_pipeline_stage: int = field(\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\n+ )\n+\n+ def __post_init__(self):\n+ prefix = \"MEGATRON_\"\n+ if self.tp_degree is None:\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\n+ if self.pp_degree is None:\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\n+ if self.num_micro_batches is None:\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\n+ if self.gradient_clipping is None:\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\n+ if self.recompute_activation is None:\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\n+ if self.use_distributed_optimizer is None:\n+ self.use_distributed_optimizer = (\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\n+ )\n+ if self.sequence_parallelism is None:\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\n+\n+ if self.pp_degree > 1:\n+ self.DDP_impl = \"local\"\n+ else:\n+ self.DDP_impl = \"torch\"\n+\n+ self.megtron_lm_default_args = {", "from_author": true }, { "body": "I don't believe all of those should be changed just for MegatronLM.", "diff_hunk": "@@ -1032,7 +1174,7 @@ def wait_for_everyone(self):\n \"\"\"\n wait_for_everyone()\n \n- @on_main_process\n+ @on_last_process", "from_author": false }, { "body": "Here the message should be more in the lines of \"The Megatron-LM only supports those\"", "diff_hunk": "@@ -643,3 +645,291 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\n # called from all ranks, though only rank0 has a valid param for full_osd\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\n optimizer.load_state_dict(sharded_osd)\n+\n+\n+@dataclass\n+class MegatronLMPlugin:\n+ \"\"\"\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\n+ activation recomputation and optimized fused kernels.\n+ \"\"\"\n+\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\n+ gradient_clipping: float = field(\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\n+ )\n+ sequence_parallelism: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable sequence parallelism\"},\n+ )\n+ recompute_activation: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable selective activation recomputation\"},\n+ )\n+ use_distributed_optimizer: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable distributed optimizer\"},\n+ )\n+ pipeline_model_parallel_split_rank: int = field(\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\n+ )\n+ num_layers_per_virtual_pipeline_stage: int = field(\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\n+ )\n+ train_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Total number of iterations to train over all training runs. \"\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\n+ },\n+ )\n+ train_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Total number of samples to train over all training runs. \"\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\n+ },\n+ )\n+ weight_decay_incr_style: str = field(\n+ default=\"constant\",\n+ metadata={\n+ \"help\": 'Weight decay increment function. choices=[\"constant\", \"linear\", \"cosine\"]. '\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ start_weight_decay: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Initial weight decay coefficient for L2 regularization. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ end_weight_decay: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"End of run weight decay coefficient for L2 regularization. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_decay_style: str = field(\n+ default=\"linear\",\n+ metadata={\n+ \"help\": \"Learning rate decay function. choices=['constant', 'linear', 'cosine']. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_decay_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of iterations for learning rate decay. If None defaults to `train_iters`. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_decay_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of samples for learning rate decay. If None defaults to `train_samples`. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_warmup_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"number of iterations to linearly warmup learning rate over. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_warmup_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"number of samples to linearly warmup learning rate over. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_warmup_fraction: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ min_lr: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Minumum value for learning rate. The scheduler clip values below this threshold. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ consumed_samples: List[int] = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call.\"\n+ },\n+ )\n+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to disable weight decay.\"})\n+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to scale learning rate.\"})\n+ lr_mult: float = field(default=1.0, metadata={\"help\": \"Learning rate multiplier.\"})\n+ megatron_dataset_flag: bool = field(\n+ default=False,\n+ metadata={\"help\": \"Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format.\"},\n+ )\n+\n+ def __post_init__(self):\n+ prefix = \"MEGATRON_\"\n+ if self.tp_degree is None:\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\n+ if self.pp_degree is None:\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\n+ if self.num_micro_batches is None:\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\n+ if self.gradient_clipping is None:\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\n+ if self.recompute_activation is None:\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\n+ if self.use_distributed_optimizer is None:\n+ self.use_distributed_optimizer = (\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\n+ )\n+ if self.sequence_parallelism is None:\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\n+\n+ if self.pp_degree > 1:\n+ self.DDP_impl = \"local\"\n+ else:\n+ self.DDP_impl = \"torch\"\n+\n+ if self.consumed_samples is not None:\n+ if len(self.consumed_samples) == 1:\n+ self.consumed_samples.extend([0, 0])\n+ elif len(self.consumed_samples) == 2:\n+ self.consumed_samples.append(0)\n+\n+ self.megatron_lm_default_args = {\n+ \"tensor_model_parallel_size\": self.tp_degree,\n+ \"pipeline_model_parallel_size\": self.pp_degree,\n+ \"pipeline_model_parallel_split_rank\": self.pipeline_model_parallel_split_rank,\n+ \"num_layers_per_virtual_pipeline_stage\": self.num_layers_per_virtual_pipeline_stage,\n+ \"DDP_impl\": self.DDP_impl,\n+ \"use_distributed_optimizer\": self.use_distributed_optimizer,\n+ \"recompute_activations\": self.recompute_activation,\n+ \"sequence_parallel\": self.sequence_parallelism,\n+ \"clip_grad\": self.gradient_clipping,\n+ \"num_micro_batches\": self.num_micro_batches,\n+ \"consumed_samples\": self.consumed_samples,\n+ \"no_wd_decay_cond\": self.no_wd_decay_cond,\n+ \"scale_lr_cond\": self.scale_lr_cond,\n+ \"lr_mult\": self.lr_mult,\n+ \"megatron_dataset_flag\": self.megatron_dataset_flag,\n+ }\n+\n+ def set_network_size_args(self, model):\n+ # Check if the model is either BERT, GPT or T5 else raise error\n+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'\n+ if \"bert\" in model.__class__.__name__.lower():\n+ model_type_name = \"bert\"\n+ num_layers = model.config.num_hidden_layers\n+ hidden_size = model.config.hidden_size\n+ num_attention_heads = model.config.num_attention_heads\n+ max_position_embeddings = model.config.max_position_embeddings\n+ num_labels = model.config.num_labels\n+ orig_vocab_size = model.config.vocab_size\n+ if \"maskedlm\" in model.__class__.__name__.lower():\n+ pretraining_flag = True\n+\n+ elif \"gpt\" in model.__class__.__name__.lower():\n+ model_type_name = \"gpt\"\n+ num_layers = model.config.n_layer\n+ hidden_size = model.config.n_embd\n+ num_attention_heads = model.config.n_head\n+ max_position_embeddings = model.config.n_positions\n+ orig_vocab_size = model.config.vocab_size\n+ pretraining_flag = True\n+ elif \"t5\" in model.__class__.__name__.lower():\n+ model_type_name = \"t5\"\n+ num_layers = model.config.num_layers\n+ hidden_size = model.config.d_model\n+ num_attention_heads = model.config.num_heads\n+ max_position_embeddings = model.config.n_positions\n+ orig_vocab_size = model.config.vocab_size\n+ pretraining_flag = True\n+ else:\n+ raise ValueError(\"Model is not BERT, GPT or T5. Please check the model you are using.\")", "from_author": false }, { "body": "You should check the `model_type` attribute instead of the class name IMO.", "diff_hunk": "@@ -643,3 +645,291 @@ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_ind\n # called from all ranks, though only rank0 has a valid param for full_osd\n sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\n optimizer.load_state_dict(sharded_osd)\n+\n+\n+@dataclass\n+class MegatronLMPlugin:\n+ \"\"\"\n+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective\n+ activation recomputation and optimized fused kernels.\n+ \"\"\"\n+\n+ tp_degree: int = field(default=None, metadata={\"help\": \"tensor parallelism degree.\"})\n+ pp_degree: int = field(default=None, metadata={\"help\": \"pipeline parallelism degree.\"})\n+ num_micro_batches: int = field(default=None, metadata={\"help\": \"number of micro-batches.\"})\n+ gradient_clipping: float = field(\n+ default=None, metadata={\"help\": \"gradient clipping value based on global L2 Norm (0 to disable)\"}\n+ )\n+ sequence_parallelism: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable sequence parallelism\"},\n+ )\n+ recompute_activation: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable selective activation recomputation\"},\n+ )\n+ use_distributed_optimizer: bool = field(\n+ default=None,\n+ metadata={\"help\": \"enable distributed optimizer\"},\n+ )\n+ pipeline_model_parallel_split_rank: int = field(\n+ default=None, metadata={\"help\": \"Rank where encoder and decoder should be split.\"}\n+ )\n+ num_layers_per_virtual_pipeline_stage: int = field(\n+ default=None, metadata={\"help\": \"Number of layers per virtual pipeline stage.\"}\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the micro_batch_size\"},\n+ )\n+ train_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Total number of iterations to train over all training runs. \"\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\n+ },\n+ )\n+ train_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Total number of samples to train over all training runs. \"\n+ \"Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`\"\n+ },\n+ )\n+ weight_decay_incr_style: str = field(\n+ default=\"constant\",\n+ metadata={\n+ \"help\": 'Weight decay increment function. choices=[\"constant\", \"linear\", \"cosine\"]. '\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ start_weight_decay: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Initial weight decay coefficient for L2 regularization. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ end_weight_decay: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"End of run weight decay coefficient for L2 regularization. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_decay_style: str = field(\n+ default=\"linear\",\n+ metadata={\n+ \"help\": \"Learning rate decay function. choices=['constant', 'linear', 'cosine']. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_decay_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of iterations for learning rate decay. If None defaults to `train_iters`. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_decay_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of samples for learning rate decay. If None defaults to `train_samples`. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_warmup_iters: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"number of iterations to linearly warmup learning rate over. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_warmup_samples: int = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"number of samples to linearly warmup learning rate over. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ lr_warmup_fraction: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ min_lr: float = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Minumum value for learning rate. The scheduler clip values below this threshold. \"\n+ \"Only applicable when using `MegatronLMDummyScheduler`.\"\n+ },\n+ )\n+ consumed_samples: List[int] = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call.\"\n+ },\n+ )\n+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to disable weight decay.\"})\n+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={\"help\": \"Condition to scale learning rate.\"})\n+ lr_mult: float = field(default=1.0, metadata={\"help\": \"Learning rate multiplier.\"})\n+ megatron_dataset_flag: bool = field(\n+ default=False,\n+ metadata={\"help\": \"Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format.\"},\n+ )\n+\n+ def __post_init__(self):\n+ prefix = \"MEGATRON_\"\n+ if self.tp_degree is None:\n+ self.tp_degree = int(os.environ.get(prefix + \"TP_DEGREE\", 1))\n+ if self.pp_degree is None:\n+ self.pp_degree = int(os.environ.get(prefix + \"PP_DEGREE\", 1))\n+ if self.num_micro_batches is None:\n+ self.num_micro_batches = int(os.environ.get(prefix + \"NUM_MICRO_BATCHES\", 1))\n+ if self.gradient_clipping is None:\n+ self.gradient_clipping = float(os.environ.get(prefix + \"GRADIENT_CLIPPING\", 1.0))\n+ if self.recompute_activation is None:\n+ self.recompute_activation = strtobool(os.environ.get(prefix + \"RECOMPUTE_ACTIVATION\", \"False\")) == 1\n+ if self.use_distributed_optimizer is None:\n+ self.use_distributed_optimizer = (\n+ strtobool(os.environ.get(prefix + \"USE_DISTRIBUTED_OPTIMIZER\", \"False\")) == 1\n+ )\n+ if self.sequence_parallelism is None:\n+ self.sequence_parallelism = strtobool(os.environ.get(prefix + \"SEQUENCE_PARALLELISM\", \"False\")) == 1\n+\n+ if self.pp_degree > 1:\n+ self.DDP_impl = \"local\"\n+ else:\n+ self.DDP_impl = \"torch\"\n+\n+ if self.consumed_samples is not None:\n+ if len(self.consumed_samples) == 1:\n+ self.consumed_samples.extend([0, 0])\n+ elif len(self.consumed_samples) == 2:\n+ self.consumed_samples.append(0)\n+\n+ self.megatron_lm_default_args = {\n+ \"tensor_model_parallel_size\": self.tp_degree,\n+ \"pipeline_model_parallel_size\": self.pp_degree,\n+ \"pipeline_model_parallel_split_rank\": self.pipeline_model_parallel_split_rank,\n+ \"num_layers_per_virtual_pipeline_stage\": self.num_layers_per_virtual_pipeline_stage,\n+ \"DDP_impl\": self.DDP_impl,\n+ \"use_distributed_optimizer\": self.use_distributed_optimizer,\n+ \"recompute_activations\": self.recompute_activation,\n+ \"sequence_parallel\": self.sequence_parallelism,\n+ \"clip_grad\": self.gradient_clipping,\n+ \"num_micro_batches\": self.num_micro_batches,\n+ \"consumed_samples\": self.consumed_samples,\n+ \"no_wd_decay_cond\": self.no_wd_decay_cond,\n+ \"scale_lr_cond\": self.scale_lr_cond,\n+ \"lr_mult\": self.lr_mult,\n+ \"megatron_dataset_flag\": self.megatron_dataset_flag,\n+ }\n+\n+ def set_network_size_args(self, model):\n+ # Check if the model is either BERT, GPT or T5 else raise error\n+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'\n+ if \"bert\" in model.__class__.__name__.lower():", "from_author": false }, { "body": "To be cleaned up?", "diff_hunk": "@@ -0,0 +1,821 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+from abc import ABC\n+from functools import partial\n+\n+import torch\n+import torch.nn.functional as F\n+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n+\n+from ..optimizer import AcceleratedOptimizer\n+from ..scheduler import AcceleratedScheduler\n+from .imports import is_megatron_lm_available, is_transformers_available\n+from .operations import recursively_apply\n+\n+\n+if is_transformers_available():\n+ from transformers.modeling_outputs import (\n+ CausalLMOutputWithCrossAttentions,\n+ Seq2SeqLMOutput,\n+ SequenceClassifierOutput,\n+ )\n+ from transformers.models.bert.modeling_bert import BertForPreTrainingOutput\n+\n+\n+if is_megatron_lm_available():\n+ from megatron import get_args, get_num_microbatches, get_timers, mpu\n+ from megatron.arguments import _add_data_args\n+ from megatron.data.data_samplers import build_pretraining_data_loader\n+ from megatron.data.dataset_utils import build_train_valid_test_datasets\n+ from megatron.initialize import (\n+ get_tensor_model_parallel_group,\n+ get_tensor_model_parallel_src_rank,\n+ initialize_megatron,\n+ set_jit_fusion_options,\n+ )\n+ from megatron.model import BertModel, GPTModel, ModelType, T5Model\n+ from megatron.model.classification import Classification\n+ from megatron.model.module import MegatronModule\n+ from megatron.optimizer import get_megatron_optimizer\n+ from megatron.schedules import get_forward_backward_func\n+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding\n+ from megatron.training import build_train_valid_test_data_iterators, get_model, get_optimizer_param_scheduler\n+ from megatron.utils import average_losses_across_data_parallel_group, get_ltor_masks_and_position_ids\n+\n+\n+def model_provider_func(accelerator, pre_process=True, post_process=True, add_encoder=True, add_decoder=True):\n+ \"\"\"Build the model.\"\"\"\n+ args = get_args()\n+ mode = \"pre-training\" if args.pretraining_flag else \"fine-tuning\"\n+ accelerator.print(f\"Building {args.model_type_name} model in the {mode} mode.\")\n+ if args.model_type_name == \"bert\":\n+ if args.pretraining_flag:\n+ num_tokentypes = 2 if args.bert_binary_head else 0\n+ model = BertModel(\n+ num_tokentypes=num_tokentypes,\n+ add_binary_head=args.bert_binary_head,\n+ parallel_output=True,\n+ pre_process=pre_process,\n+ post_process=post_process,\n+ )\n+ else:\n+\n+ model = Classification(\n+ num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process\n+ )\n+ elif args.model_type_name == \"gpt2\":\n+ model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process)\n+ elif args.model_type_name == \"t5\":\n+ model = T5Model(\n+ num_tokentypes=0,\n+ parallel_output=True,\n+ pre_process=pre_process,\n+ post_process=post_process,\n+ add_encoder=add_encoder,\n+ add_decoder=add_decoder,\n+ )\n+ else:\n+ raise ValueError(f\"Unknown model type: {args.model_type_name}\")\n+ return model\n+\n+\n+def prepare_data_loader(accelerator, dataloader, consumed_samples_index=-1, consumed_samples=0):\n+ accelerator.print(\"Preparing dataloader\")\n+ args = get_args()\n+ if not args.megatron_dataset_flag:\n+ if args.consumed_samples is not None:\n+ consumed_samples = args.consumed_samples[consumed_samples_index]\n+ return build_pretraining_data_loader(dataloader.dataset, consumed_samples)\n+ else:\n+ if args.consumed_samples is not None:\n+ (\n+ args.consumed_train_samples,\n+ args.consumed_valid_samples,\n+ args.consumed_test_samples,\n+ ) = args.consumed_samples\n+ else:\n+ args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0\n+ train_data_iterator, valid_data_iterator, test_data_iterator = build_train_valid_test_data_iterators(\n+ MegatronLMDummyDataLoader.get_train_valid_test_datasets_provider\n+ )\n+ return train_data_iterator, valid_data_iterator, test_data_iterator\n+\n+\n+def prepare_model(accelerator):\n+ accelerator.print(\"Preparing model\")\n+ args = get_args()\n+ if args.model_type_name == \"bert\" or args.model_type_name == \"gpt\":\n+ model_type = ModelType.encoder_or_decoder\n+ elif args.model_type_name == \"t5\":\n+ model_type = ModelType.encoder_and_decoder\n+ model = get_model(model_provider_func, model_type)\n+ return model\n+\n+\n+def prepare_optimizer(accelerator, model):\n+ accelerator.print(\"Preparing optimizer\")\n+ args = get_args()\n+ optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)\n+ return optimizer\n+\n+\n+def prepare_scheduler(accelerator, optimizer, scheduler, is_dummy_scheduler):\n+ accelerator.print(\"Preparing scheduler\")\n+ if is_dummy_scheduler:\n+ scheduler = get_optimizer_param_scheduler(optimizer)\n+ else:\n+ scheduler.optimizer = optimizer\n+ if isinstance(scheduler, torch.optim.lr_scheduler.LambdaLR):\n+ scheduler = scheduler.__class__(optimizer, scheduler.lr_lambdas[0])\n+ return scheduler\n+\n+\n+def initialize(accelerator, extra_args_provider=None, args_defaults={}):\n+ accelerator.print(\"Initializing Megatron-LM\")\n+ # Initalize and get arguments\n+ initialize_megatron(extra_args_provider=extra_args_provider, args_defaults=args_defaults, ignore_unknown_args=True)\n+ # Set pytorch JIT layer fusion options and warmup JIT functions.\n+ set_jit_fusion_options()\n+ args = get_args()\n+ args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args)\n+ if args.model_type_name == \"bert\" and args.pretraining_flag and args.num_labels == 2:\n+ args.bert_binary_head = True\n+ # if args.virtual_pipeline_model_parallel_size is not None:\n+ # raise Exception(\"Virtual pipeline model parallelism is not supported in Accelerate\")", "from_author": false }, { "body": "The aliases should be done in the `.utils` module: if we have another integration like this, we won't be able to have both names.", "diff_hunk": "@@ -71,6 +73,21 @@\n DummyScheduler,\n )\n \n+if is_megatron_lm_available():\n+ from .utils import (\n+ MegatronEngine,\n+ MegatronLMDummyDataLoader,\n+ MegatronLMDummyScheduler,\n+ MegatronLMOptimizerWrapper,\n+ MegatronLMSchedulerWrapper,\n+ )\n+ from .utils import initialize as megatron_lm_initialize\n+ from .utils import prepare_data_loader as megatron_lm_prepare_data_loader\n+ from .utils import prepare_model as megatron_lm_prepare_model\n+ from .utils import prepare_optimizer as megatron_lm_prepare_optimizer\n+ from .utils import prepare_scheduler as megatron_lm_prepare_scheduler", "from_author": false }, { "body": "AS said above, those should probably have the `megatron_lm` prefix here.", "diff_hunk": "@@ -94,6 +96,23 @@\n )\n \n from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\n+from .megatron_lm import (\n+ AbstractTrainStep,\n+ BertTrainStep,\n+ GPTTrainStep,\n+ MegatronEngine,\n+ MegatronLMDummyDataLoader,\n+ MegatronLMDummyScheduler,\n+ MegatronLMOptimizerWrapper,\n+ MegatronLMSchedulerWrapper,\n+ T5TrainStep,\n+ avg_losses_across_data_parallel_group,\n+ initialize,\n+ prepare_data_loader,\n+ prepare_model,\n+ prepare_optimizer,\n+ prepare_scheduler,", "from_author": false }, { "body": "Nit: in a followup PR it would be nice to have all of those in one constant.", "diff_hunk": "@@ -224,6 +224,7 @@ def gather(tensor):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -71,6 +73,21 @@\n DummyScheduler,\n )\n \n+if is_megatron_lm_available():\n+ from .utils import (\n+ MegatronEngine,\n+ MegatronLMDummyDataLoader,\n+ MegatronLMDummyScheduler,\n+ MegatronLMOptimizerWrapper,\n+ MegatronLMSchedulerWrapper,\n+ )\n+ from .utils import initialize as megatron_lm_initialize\n+ from .utils import prepare_data_loader as megatron_lm_prepare_data_loader\n+ from .utils import prepare_model as megatron_lm_prepare_model\n+ from .utils import prepare_optimizer as megatron_lm_prepare_optimizer\n+ from .utils import prepare_scheduler as megatron_lm_prepare_scheduler", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -94,6 +96,23 @@\n )\n \n from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\n+from .megatron_lm import (\n+ AbstractTrainStep,\n+ BertTrainStep,\n+ GPTTrainStep,\n+ MegatronEngine,\n+ MegatronLMDummyDataLoader,\n+ MegatronLMDummyScheduler,\n+ MegatronLMOptimizerWrapper,\n+ MegatronLMSchedulerWrapper,\n+ T5TrainStep,\n+ avg_losses_across_data_parallel_group,\n+ initialize,\n+ prepare_data_loader,\n+ prepare_model,\n+ prepare_optimizer,\n+ prepare_scheduler,", "from_author": true }, { "body": "Can be for another PR, but would be nice to explain the difference between those kinds of parallelism and what is parallelized exactly.", "diff_hunk": "@@ -0,0 +1,507 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+\n+# Megatron-LM\n+\n+[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.\n+It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based\n+Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).\n+For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).\n+\n+## What is integrated?\n+\n+Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n+of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n+\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+\n+b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n+Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n+Layers are distributed uniformly across PP stages.\n+\n+c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n+\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.", "from_author": false }, { "body": "PR #758 to resolve this", "diff_hunk": "@@ -224,6 +224,7 @@ def gather(tensor):\n DistributedType.DEEPSPEED,\n DistributedType.MULTI_GPU,\n DistributedType.FSDP,\n+ DistributedType.MEGATRON_LM,", "from_author": true }, { "body": "PR #759 to address this.", "diff_hunk": "@@ -0,0 +1,507 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+\n+# Megatron-LM\n+\n+[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.\n+It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based\n+Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).\n+For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).\n+\n+## What is integrated?\n+\n+Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning\n+of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder):\n+\n+a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks\n+\n+b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. \n+Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. \n+Layers are distributed uniformly across PP stages.\n+\n+c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.\n+\n+d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces memory footprint by sharding optimizer states and gradients across DP ranks.", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/667", "pr_id": 1040964232 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 4991ddbe4..353ccf894 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -140,7 +140,7 @@ class Accelerator:\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\n are created. See [kwargs](kwargs) for more information.\n \n- **Attributes:**\n+ **Available attributes:**\n \n - **device** (`torch.device`) -- The device to use.\n - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.\n@@ -463,6 +463,28 @@ def no_sync(self, model):\n Args:\n model (`torch.nn.Module`):\n PyTorch Module that was prepared with `Accelerator.prepare`\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator()\n+ >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)\n+ >>> input_a = next(iter(dataloader))\n+ >>> input_b = next(iter(dataloader))\n+\n+ >>> with accelerator.no_sync():\n+ ... outputs = model(input_a)\n+ ... loss = loss_func(outputs)\n+ ... accelerator.backward(loss)\n+ ... # No synchronization across processes, only accumulate gradients\n+ >>> outputs = model(input_b)\n+ >>> accelerator.backward(loss)\n+ >>> # Synchronization across all processes\n+ >>> optimizer.step()\n+ >>> optimizer.zero_grad()\n+ ```\n \"\"\"\n context = contextlib.nullcontext\n if self.use_distributed:\n@@ -492,6 +514,24 @@ def accumulate(self, model):\n Args:\n model (`torch.nn.Module`):\n PyTorch Module that was prepared with `Accelerator.prepare`\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)\n+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\n+\n+ >>> with accelerator.accumulate():\n+ ... for input, output in dataloader:\n+ ... outputs = model(input)\n+ ... loss = loss_func(outputs)\n+ ... loss.backward()\n+ ... optimizer.step()\n+ ... scheduler.step()\n+ ... optimizer.zero_grad()\n+ ```\n \"\"\"\n self._do_sync()\n if self.sync_gradients:\n@@ -873,7 +913,10 @@ def prepare_scheduler(self, scheduler):\n \n def backward(self, loss, **kwargs):\n \"\"\"\n- Use `accelerator.backward(loss)` in lieu of `loss.backward()`.\n+ Scales the gradients in accordance to `Accelerator.gradient_accumulation_steps` and calls the correct\n+ `backward()` based on the configuration.\n+\n+ Should be used in lieu of `loss.backward()`.\n \"\"\"\n loss /= self.gradient_accumulation_steps\n if self.distributed_type == DistributedType.DEEPSPEED:\n@@ -906,6 +949,24 @@ def unscale_gradients(self, optimizer=None):\n def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \"\"\"\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)\n+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\n+\n+ >>> for (input, target) in dataloader:\n+ ... optimizer.zero_grad()\n+ ... output = model(input)\n+ ... loss = loss_func(output, target)\n+ ... accelerator.backward(loss)\n+ ... if accelerator.sync_gradients:\n+ ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)\n+ ... optimizer.step()\n+ ```\n \"\"\"\n if self.distributed_type == DistributedType.FSDP:\n self.unscale_gradients()\n@@ -923,6 +984,24 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n def clip_grad_value_(self, parameters, clip_value):\n \"\"\"\n Should be used in place of `torch.nn.utils.clip_grad_value_`.\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate import Accelerator\n+\n+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)\n+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\n+\n+ >>> for (input, target) in dataloader:\n+ ... optimizer.zero_grad()\n+ ... output = model(input)\n+ ... loss = loss_func(output, target)\n+ ... accelerator.backward(loss)\n+ ... if accelerator.sync_gradients:\n+ ... accelerator.clip_grad_value_(model.parameters(), clip_value)\n+ ... optimizer.step()\n+ ```\n \"\"\"\n if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:\n raise Exception(\"DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.\")\n@@ -1124,6 +1203,13 @@ def save_state(self, output_dir: str):\n \"\"\"\n Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n \n+ <Tip>\n+\n+ Should only be used when wanting to save a checkpoint during training and restoring the state in the same\n+ environment.\n+\n+ </Tip>\n+\n Args:\n output_dir (`str` or `os.PathLike`):\n The name of the folder to save all relevant weights and states.\n@@ -1179,6 +1265,12 @@ def load_state(self, input_dir: str):\n \"\"\"\n Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n \n+ <Tip>\n+\n+ Should only be used in conjunction with [`Accelerator.save_state`].\n+\n+ </Tip>\n+\n Args:\n input_dir (`str` or `os.PathLike`):\n The name of the folder all relevant weights and states were saved in.\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\nindex 2128cdca7..2c67e24eb 100644\n--- a/src/accelerate/logging.py\n+++ b/src/accelerate/logging.py\n@@ -49,15 +49,20 @@ def get_logger(name: str):\n \n If a log should be called on all processes, pass `main_process_only=False`\n \n- E.g.\n- ```python\n- logger.info(\"My log\", main_process_only=False)\n- logger.debug(\"My log\", main_process_only=False)\n- ```\n-\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n+\n+ Example:\n+\n+ ```python\n+ >>> from accelerate.logging import get_logger\n+\n+ >>> logger = get_logger(__name__)\n+\n+ >>> logger.info(\"My log\", main_process_only=False)\n+ >>> logger.debug(\"My log\", main_process_only=True)\n+ ```\n \"\"\"\n logger = logging.getLogger(name)\n return MultiProcessAdapter(logger, {})\ndiff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nindex 75695091e..4ae91a657 100644\n--- a/src/accelerate/scheduler.py\n+++ b/src/accelerate/scheduler.py\n@@ -28,7 +28,7 @@ class AcceleratedScheduler:\n to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed\n precision training)\n \n- When performing gradient accumulation scheduler lengths should not be changed accordingly, accelerate will always\n+ When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always\n step the scheduler to account for it.\n \n Args:\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 051cf8bc6..a700cfad6 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -50,14 +50,14 @@ class AcceleratorState:\n \"\"\"\n Singleton class that has information about the current training environment.\n \n- **Attributes:**\n+ **Available attributes:**\n \n - **device** (`torch.device`) -- The device to use.\n - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently\n in use.\n - **local_process_index** (`int`) -- The index of the current process on the current server.\n- - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision. If you are using\n- mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type\n+ of mixed precision being performed.\n - **num_processes** (`int`) -- The number of processes currently launched in parallel.\n - **process_index** (`int`) -- The index of the current process.\n \"\"\"\n@@ -278,10 +278,11 @@ class GradientState:\n \"\"\"\n Singleton class that has information related to gradient synchronization for gradient accumulation\n \n- **Attributes:**\n+ **Available attributes:**\n \n - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\n - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader\n+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices\n \"\"\"\n \n _shared_state = {}\n@@ -310,5 +311,5 @@ def _set_end_of_dataloader(self, end_of_dataloader):\n self.end_of_dataloader = end_of_dataloader\n \n def _set_remainder(self, remainder):\n- \"Private function that sets the number of remaining samples at the end of the dataloader\"\n+ \"Private function that sets the number of remaining samples at the end of the dataloader. Users should not have to call this.\"\n self.remainder = remainder\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/666", "pr_id": 1040133897 }, { "diff": "diff --git a/README.md b/README.md\nindex f15f442a7..deddf2986 100644\n--- a/README.md\n+++ b/README.md\n@@ -243,3 +243,16 @@ pip install accelerate\n - FP16 with native AMP (apex on the roadmap)\n - DeepSpeed support (Experimental)\n - PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)\n+\n+## Citing πŸ€— Accelerate\n+\n+If you use πŸ€— Accelerate in your publication, please cite it by using the following BibTeX entry.\n+\n+```bibtex\n+@Misc{accelerate,\n+ title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},\n+ author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar},\n+ howpublished = {\\url{https://github.com/huggingface/accelerate}},\n+ year = {2022}\n+}\n+```\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/660", "pr_id": 1037561043 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 839a7a2a9..028e431e1 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -16,9 +16,12 @@\n # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n \n import os\n+import time\n from abc import ABCMeta, abstractmethod, abstractproperty\n from typing import List, Optional, Union\n \n+import yaml\n+\n from .logging import get_logger\n from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n \n@@ -142,7 +145,8 @@ def tracker(self):\n \n def store_init_configuration(self, values: dict):\n \"\"\"\n- Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the\n+ hyperparameters in a yaml file for future use.\n \n Args:\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n@@ -151,7 +155,16 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ project_run_name = time.time()\n+ dir_name = os.path.join(self.logging_dir, str(project_run_name))\n+ os.makedirs(dir_name, exist_ok=True)\n+ with open(os.path.join(dir_name, \"hparams.yml\"), \"w\") as outfile:\n+ try:\n+ yaml.dump(values, outfile)\n+ except yaml.representer.RepresenterError:\n+ logger.error(\"Serialization to store hyperparameters failed\")\n+ raise\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\n \n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n", "code_comments": [ { "body": "Perhaps it may be good to make use of the `logging_dir` to send the yaml to? Or should these be stored separately normally.\r\n\r\n(This part):\r\n```python\r\nwriter = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\r\n```\r\n", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"hparams.yaml\", \"w\") as outfile:", "from_author": false }, { "body": "So, should I save the yaml file in the `logging_dir`?", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"hparams.yaml\", \"w\") as outfile:", "from_author": true }, { "body": "Yep, as this is where users would expect it to wind up landing. E.g\r\n\r\nlogging_dir/hparams.yaml", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"hparams.yaml\", \"w\") as outfile:", "from_author": false }, { "body": "Yeah makes sense. I have made the changes now.", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"hparams.yaml\", \"w\") as outfile:", "from_author": true }, { "body": "Would we want to use pathlib `Path(self.logging_dir) / 'hparams.yaml'` or `os.path.join(self.logging_dir, 'hparams.yaml')` instead here for sanity purposes?", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:", "from_author": false }, { "body": "Do we need to check if all keys/vals are serializable here, or are we going to let the error yaml throws be the one we present to users in this case? (not sure if this is done elsewhere in the codebase)", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": false }, { "body": "We do that somewhat here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/tracking.py#L170-L176\r\n\r\nMaybe a try/catch that alerts the user that non-serializable bits were tried to be stored? Not sure what the try/catch error would be for that, otherwise I'm open to it just throwing the yaml error if we cannot :) ", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": false }, { "body": "could catch a `yaml.representer.RepresenterError` and return the message maybe?", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": false }, { "body": "Along with more descriptive explanation of what happened under the hood accelerate...otherwise its the same as letting yaml handle it. \r\n\r\nI'm fine with either solution, just wanted to bring it up!", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": false }, { "body": "We should be careful here that `hparams.yaml` is saved separately for separate experiments. Should make sure that when you run 2 separate runs, you get 2 separate entries in the hparams dashboard of tensorboard. \r\n\r\nIf we save directly to `logging_dir/hparams.yaml` we would be overwriting every time, eh? ", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"hparams.yaml\", \"w\") as outfile:", "from_author": false }, { "body": "I have changed the hardcoded path here.", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:", "from_author": true }, { "body": "I have added the try/catch error. ", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": true }, { "body": "I am not sure where to give a descriptive explanation. Should I add it as a comment or should I define it in the PR?", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": true }, { "body": "The default tensorboard summary writer uses time.time() to create a folder for every run. So, I added the same for hparams.yml file. Hope this is okay.", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:", "from_author": true }, { "body": "We should change this to actually raise the error, otherwise this will silently fail.\r\n\r\n```suggestion\r\n except yaml.representer.RepresenterError:\r\n logger.error(\"Serialization to store hyperparameters failed\")\r\n raise\r\n```\r\nI think just doing `logger.error` here is fine enough", "diff_hunk": "@@ -151,7 +154,13 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ project_run_name = time.time()\n+ with open(os.path.join(self.logging_dir, project_run_name, \"hparams.yml\"), \"w\") as outfile:\n+ try:\n+ yaml.dump(values, outfile)\n+ except yaml.representer.RepresenterError:\n+ logger.info(\"Serialization to store hyperparmeters failed\")", "from_author": false }, { "body": "Doing so in the `logger.error` (see my suggestions) is good enough :) ", "diff_hunk": "@@ -151,7 +153,9 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ with open(\"self.logging_dir/hparams.yaml\", \"w\") as outfile:\n+ yaml.dump(values, outfile)", "from_author": false }, { "body": "Yes, I have made the changes.", "diff_hunk": "@@ -151,7 +154,13 @@ def store_init_configuration(self, values: dict):\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+ project_run_name = time.time()\n+ with open(os.path.join(self.logging_dir, project_run_name, \"hparams.yml\"), \"w\") as outfile:\n+ try:\n+ yaml.dump(values, outfile)\n+ except yaml.representer.RepresenterError:\n+ logger.info(\"Serialization to store hyperparmeters failed\")", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "cc @nateraw if you have any comments πŸ€— ", "from_author": false }, { "body": "@Shreyz-max cast `run_name` to a string on the yaml part and it should solve the error. E.g. `str(run_name)`\r\n\r\nGreat work on this, were almost there!", "from_author": false }, { "body": "Yeah, sorry about the mistake. I did not notice this at all.", "from_author": true }, { "body": "I suppose I need to create the folder.", "from_author": true }, { "body": "Great job! CC @nateraw for one last look, but lg2m!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/657", "pr_id": 1035923283 }, { "diff": "diff --git a/docs/source/concept_guides/gradient_synchronization.mdx b/docs/source/concept_guides/gradient_synchronization.mdx\nindex 02b5adf0d..ea4de3d72 100644\n--- a/docs/source/concept_guides/gradient_synchronization.mdx\n+++ b/docs/source/concept_guides/gradient_synchronization.mdx\n@@ -114,4 +114,6 @@ for batch in dataloader:\n outputs = model(inputs)\n loss = loss_function(outputs, targets)\n accelerator.backward(loss)\n-```\n\\ No newline at end of file\n+```\n+\n+As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. \n\\ No newline at end of file\ndiff --git a/docs/source/usage_guides/gradient_accumulation.mdx b/docs/source/usage_guides/gradient_accumulation.mdx\nindex bd06efed9..798e15847 100644\n--- a/docs/source/usage_guides/gradient_accumulation.mdx\n+++ b/docs/source/usage_guides/gradient_accumulation.mdx\n@@ -126,3 +126,5 @@ for batch in training_dataloader:\n scheduler.step()\n optimizer.zero_grad()\n ```\n+\n+To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](/concept_guides/gradient_synchronization)\n\\ No newline at end of file\n", "code_comments": [ { "body": "```suggestion\r\nAs a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. \r\n```", "diff_hunk": "@@ -114,4 +114,6 @@ for batch in dataloader:\n outputs = model(inputs)\n loss = loss_function(outputs, targets)\n accelerator.backward(loss)\n-```\n\\ No newline at end of file\n+```\n+\n+As a result, you should either use *one or the other* when it comes to API choice. ", "from_author": false } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_656). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/656", "pr_id": 1035817822 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 028e431e1..5f189c326 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -134,8 +134,8 @@ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]\n self.run_name = run_name\n self.logging_dir = os.path.join(logging_dir, run_name)\n self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\n- logger.info(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\n- logger.info(\n+ logger.debug(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\n+ logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n \n@@ -164,7 +164,7 @@ def store_init_configuration(self, values: dict):\n except yaml.representer.RepresenterError:\n logger.error(\"Serialization to store hyperparameters failed\")\n raise\n- logger.info(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\n+ logger.debug(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\n \n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n@@ -188,14 +188,14 @@ def log(self, values: dict, step: Optional[int] = None, **kwargs):\n elif isinstance(v, dict):\n self.writer.add_scalars(k, v, global_step=step, **kwargs)\n self.writer.flush()\n- logger.info(\"Successfully logged to TensorBoard\")\n+ logger.debug(\"Successfully logged to TensorBoard\")\n \n def finish(self):\n \"\"\"\n Closes `TensorBoard` writer\n \"\"\"\n self.writer.close()\n- logger.info(\"TensorBoard writer closed\")\n+ logger.debug(\"TensorBoard writer closed\")\n \n \n class WandBTracker(GeneralTracker):\n@@ -215,8 +215,8 @@ class WandBTracker(GeneralTracker):\n def __init__(self, run_name: str, **kwargs):\n self.run_name = run_name\n self.run = wandb.init(project=self.run_name, **kwargs)\n- logger.info(f\"Initialized WandB project {self.run_name}\")\n- logger.info(\n+ logger.debug(f\"Initialized WandB project {self.run_name}\")\n+ logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n \n@@ -234,7 +234,7 @@ def store_init_configuration(self, values: dict):\n `str`, `float`, `int`, or `None`.\n \"\"\"\n wandb.config.update(values)\n- logger.info(\"Stored initial configuration hyperparameters to WandB\")\n+ logger.debug(\"Stored initial configuration hyperparameters to WandB\")\n \n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n@@ -250,14 +250,14 @@ def log(self, values: dict, step: Optional[int] = None, **kwargs):\n Additional key word arguments passed along to the `wandb.log` method.\n \"\"\"\n self.run.log(values, step=step, **kwargs)\n- logger.info(\"Successfully logged to WandB\")\n+ logger.debug(\"Successfully logged to WandB\")\n \n def finish(self):\n \"\"\"\n Closes `wandb` writer\n \"\"\"\n self.run.finish()\n- logger.info(\"WandB run closed\")\n+ logger.debug(\"WandB run closed\")\n \n \n class CometMLTracker(GeneralTracker):\n@@ -279,8 +279,8 @@ class CometMLTracker(GeneralTracker):\n def __init__(self, run_name: str, **kwargs):\n self.run_name = run_name\n self.writer = Experiment(project_name=run_name, **kwargs)\n- logger.info(f\"Initialized CometML project {self.run_name}\")\n- logger.info(\n+ logger.debug(f\"Initialized CometML project {self.run_name}\")\n+ logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n \n@@ -298,7 +298,7 @@ def store_init_configuration(self, values: dict):\n `str`, `float`, `int`, or `None`.\n \"\"\"\n self.writer.log_parameters(values)\n- logger.info(\"Stored initial configuration hyperparameters to CometML\")\n+ logger.debug(\"Stored initial configuration hyperparameters to CometML\")\n \n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n@@ -323,14 +323,14 @@ def log(self, values: dict, step: Optional[int] = None, **kwargs):\n self.writer.log_other(k, v, **kwargs)\n elif isinstance(v, dict):\n self.writer.log_metrics(v, step=step, **kwargs)\n- logger.info(\"Successfully logged to CometML\")\n+ logger.debug(\"Successfully logged to CometML\")\n \n def finish(self):\n \"\"\"\n Closes `comet-ml` writer\n \"\"\"\n self.writer.end()\n- logger.info(\"CometML run closed\")\n+ logger.debug(\"CometML run closed\")\n \n \n LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n@@ -384,6 +384,6 @@ def filter_trackers(\n )\n loggers.append(log_type)\n else:\n- logger.info(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\n+ logger.debug(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\n \n return loggers\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/655", "pr_id": 1035655626 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 839a7a2a9..08b988250 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -20,7 +20,7 @@\n from typing import List, Optional, Union\n \n from .logging import get_logger\n-from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+from .utils import LoggerType, is_aim_available, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n \n \n _available_trackers = []\n@@ -40,6 +40,11 @@\n \n _available_trackers.append(LoggerType.COMETML)\n \n+if is_aim_available():\n+ from aim import Run\n+\n+ _available_trackers.append(LoggerType.AIM)\n+\n \n logger = get_logger(__name__)\n \n@@ -320,7 +325,72 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, **kwargs)\n+ self.writer.name = self.run_name\n+ logger.debug(f\"Initialized Aim project {self.run_name}\")\n+ logger.debug(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"] = values\n+\n+ def log(self, values: dict, step: Optional[int], **kwargs):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.track` method.\n+ \"\"\"\n+ # Note: replace this with the dictionary support when merged\n+ for key, value in values.items():\n+ self.writer.track(value, name=key, step=step, **kwargs)\n+\n+ def finish(self):\n+ \"\"\"\n+ Closes `aim` writer\n+ \"\"\"\n+ self.writer.close()\n+\n+\n+LOGGER_TYPE_TO_CLASS = {\n+ \"aim\": AimTracker,\n+ \"comet_ml\": CometMLTracker,\n+ \"tensorboard\": TensorBoardTracker,\n+ \"wandb\": WandBTracker,\n+}\n \n \n def filter_trackers(\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 2b8f566f0..ae5af1c3f 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -20,6 +20,7 @@\n )\n from .imports import (\n get_ccl_version,\n+ is_aim_available,\n is_apex_available,\n is_bf16_available,\n is_boto3_available,\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 5f425f200..9f6e30bec 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -196,6 +196,7 @@ class LoggerType(BaseEnum):\n \"\"\"\n \n ALL = \"all\"\n+ AIM = \"aim\"\n TENSORBOARD = \"tensorboard\"\n WANDB = \"wandb\"\n COMETML = \"comet_ml\"\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 074d02e4a..6239e3deb 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -93,6 +93,10 @@ def is_datasets_available():\n return importlib.util.find_spec(\"datasets\") is not None\n \n \n+def is_aim_available():\n+ return importlib.util.find_spec(\"aim\") is not None\n+\n+\n def is_tensorboard_available():\n return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n \n", "code_comments": [ { "body": "@muellerzr the `finalize` method is more for the SDK internal use (it handles storage indexing), which will become a private method in the upcoming releases.\r\nThe resources cleanup is done in `close` method, which should be used to properly finalize the run:\r\n```py\r\nself.writer.close()\r\n```", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"].update(values)\n+\n+ def log(self, values: dict, step: Optional[int], **kwargs):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ kwargs (`dict`, *optional*):\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\n+ `epoch`.\n+ \"\"\"\n+ # Note: replace this with the dictionary support when merged\n+ context = kwargs.pop(\"context\", {})\n+ epoch = kwargs.pop(\"epoch\", None)\n+ for key, value in values.items():\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)\n+\n+ def finish(self):\n+ \"\"\"\n+ Closes `aim` writer\n+ \"\"\"\n+ self.writer.finalize()", "from_author": false }, { "body": "What about passing the name argument as a keyword argument? I guess it will be more readable and will minimize the risk of forward incompatibilities. Thoughts? πŸ€” ", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"].update(values)\n+\n+ def log(self, values: dict, step: Optional[int], **kwargs):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ kwargs (`dict`, *optional*):\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\n+ `epoch`.\n+ \"\"\"\n+ # Note: replace this with the dictionary support when merged\n+ context = kwargs.pop(\"context\", {})\n+ epoch = kwargs.pop(\"epoch\", None)\n+ for key, value in values.items():\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)", "from_author": false }, { "body": "The experiment is mainly used to group related runs together (e.g. often used in sweeps, hparams search). The experiment is not a mandatory argument, it defaults to `\"default\"`.\r\n\r\nI would recommend the `init` method to receive the repo path and the run name and pass the rest of arguments as kwargs to the Aim Run:\r\n```py\r\ndef __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\r\n ...\r\n\r\n # Init the run - specify the logs dir and pass kwargs (including experiment) \r\n self.writer = Run(repo=logging_dir, **kwargs)\r\n\r\n # Assign the run name\r\n self.writer.name = run_name\r\n```\r\n\r\n", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):", "from_author": false }, { "body": "If you're worried about that, we can absolutely do so. Easy to achieve!", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"].update(values)\n+\n+ def log(self, values: dict, step: Optional[int], **kwargs):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ kwargs (`dict`, *optional*):\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\n+ `epoch`.\n+ \"\"\"\n+ # Note: replace this with the dictionary support when merged\n+ context = kwargs.pop(\"context\", {})\n+ epoch = kwargs.pop(\"epoch\", None)\n+ for key, value in values.items():\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)", "from_author": true }, { "body": "Interesting. I've been doing it the opposite way in my experiments. So e.g. if I have 3 trials of X hyperparameter, I'd just name it `run_hparam_x` *not* `run_hyparam_x_1`?", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):", "from_author": true }, { "body": "@gorarakelyan I'd recommend updating this example, as this is where I got it from :) Will use close\r\n\r\nhttps://colab.research.google.com/drive/14rIAjpEyklf5fSMiRbyZs6iYG7IVibcI?usp=sharing#scrollTo=qOu7icths6n5", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, experiment=run_name, **kwargs)\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"].update(values)\n+\n+ def log(self, values: dict, step: Optional[int], **kwargs):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ kwargs (`dict`, *optional*):\n+ Additional key word arguments passed along to the `Run.track` method. Valid keys include `context` and\n+ `epoch`.\n+ \"\"\"\n+ # Note: replace this with the dictionary support when merged\n+ context = kwargs.pop(\"context\", {})\n+ epoch = kwargs.pop(\"epoch\", None)\n+ for key, value in values.items():\n+ self.writer.track(value, key, step=step, epoch=epoch, context=context)\n+\n+ def finish(self):\n+ \"\"\"\n+ Closes `aim` writer\n+ \"\"\"\n+ self.writer.finalize()", "from_author": true }, { "body": "@muellerzr Yup, makes sense. What about assigning trial hashes (or ids) to run names and the sweep name to experiment? Like this:\r\n\r\n| Run name | Experiment |\r\n| --- | --- |\r\n| trial_1 | run_hparam_x |\r\n| trial_2 | run_hparam_x |\r\n| trial_3 | run_hparam_x |\r\n| --- | --- |\r\n| trial_1 | run_hparam_y |\r\n| trial_2 | run_hparam_y |\r\n| trial_3 | run_hparam_y |", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):", "from_author": false }, { "body": "Sounds good! I'll switch it to this as that makes perfect sense. ", "diff_hunk": "@@ -320,7 +325,74 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):", "from_author": true }, { "body": "@muellerzr The last minor thing - the `run_hash` is a unique auto-generated id of a run. It would be better to store the name as a `run.name`. Like this:\r\n```py\r\nself.writer = Run(repo=logging_dir, **kwargs)\r\nself.writer.name = run_name\r\n```", "diff_hunk": "@@ -320,7 +325,71 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(run_hash=run_name, repo=logging_dir, **kwargs)", "from_author": false }, { "body": "Fixed, thanks!", "diff_hunk": "@@ -320,7 +325,71 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(run_hash=run_name, repo=logging_dir, **kwargs)", "from_author": true }, { "body": "Found one issue during testing. As Aim Run doesn't have an `update` method, let's just use a simple assignment here:\r\n```py\r\nself.writer[\"hparams\"] = values\r\n```", "diff_hunk": "@@ -320,7 +325,72 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, **kwargs)\n+ self.writer.name = self.run_name\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"].update(values)", "from_author": false }, { "body": "Makes sense, will update with this. Thanks for checking!", "diff_hunk": "@@ -320,7 +325,72 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, **kwargs)\n+ self.writer.name = self.run_name\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs.\n+ \"\"\"\n+ self.writer[\"hparams\"].update(values)", "from_author": true }, { "body": "Now that #655 was merged, this should be adapted.", "diff_hunk": "@@ -320,7 +325,72 @@ def finish(self):\n logger.info(\"CometML run closed\")\n \n \n-LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+class AimTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Run.__init__` method.\n+ \"\"\"\n+\n+ name = \"aim\"\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n+ self.run_name = run_name\n+ self.writer = Run(repo=logging_dir, **kwargs)\n+ self.writer.name = self.run_name\n+ logger.info(f\"Initialized Aim project {self.run_name}\")\n+ logger.info(", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@muellerzr the tracker looks great. Is there any example training script, which I can run to test out the integration? Thanks a million!", "from_author": false }, { "body": "@gorarakelyan you can follow the directions here for installing the git version of accelerate + configuring it for your system:\r\nhttps://huggingface.co/docs/accelerate/basic_tutorials/install\r\n\r\nAnd then feel free to run the `nlp_example.py` script found in the examples directory here: https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py\r\n\r\nJust make sure to also install `transformers` and `datasets` first :) ", "from_author": true }, { "body": "@muellerzr thanks, will test it out soon! πŸ€— ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/649", "pr_id": 1033234059 }, { "diff": "diff --git a/README.md b/README.md\nindex c67d5dd50..f15f442a7 100644\n--- a/README.md\n+++ b/README.md\n@@ -196,7 +196,7 @@ from accelerate import notebook_launcher\n notebook_launcher(training_function)\n ```\n \n-An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb)\n+An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)\n \n ## Why should I use πŸ€— Accelerate?\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/645", "pr_id": 1031721951 }, { "diff": "diff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\nindex fd8a8fa82..89234a911 100755\n--- a/examples/by_feature/deepspeed_with_config_support.py\n+++ b/examples/by_feature/deepspeed_with_config_support.py\n@@ -588,14 +588,12 @@ def group_texts(examples):\n checkpointing_steps = None\n \n # We need to initialize the trackers we use, and also store our configuration.\n- # We initialize the trackers only on main process because `accelerator.log`\n- # only logs on main process and we don't want empty logs/runs on other processes.\n+ # The trackers initializes automatically on the main process.\n if args.with_tracking:\n- if accelerator.is_main_process:\n- experiment_config = vars(args)\n- # TensorBoard cannot log Enums, need the raw value\n- experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\n- accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\n+ experiment_config = vars(args)\n+ # TensorBoard cannot log Enums, need the raw value\n+ experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\n+ accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\n \n # Train!\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 274ccdfce..4ae8e917e 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -113,7 +113,7 @@ def training_function(config, args):\n batch_size = int(config[\"batch_size\"])\n \n # We need to initialize the trackers we use, and also store our configuration\n- if args.with_tracking and accelerator.is_main_process:\n+ if args.with_tracking:\n experiment_config = vars(args)\n accelerator.init_trackers(\"fsdp_glue_no_trainer\", experiment_config)\n \ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 76ad7a64b..e4467697c 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -166,8 +166,8 @@ def training_function(config, args):\n )\n \n # New Code #\n- # We need to initalize the trackers we use. Overall configurations can also be stored\n- if args.with_tracking and accelerator.is_main_process:\n+ # We need to initialize the trackers we use. Overall configurations can also be stored\n+ if args.with_tracking:\n run = os.path.split(__file__)[-1].split(\".\")[0]\n accelerator.init_trackers(run, config)\n \ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 1b86e66ee..880981594 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -103,7 +103,7 @@ def training_function(config, args):\n checkpointing_steps = None\n \n # We need to initialize the trackers we use, and also store our configuration\n- if args.with_tracking and accelerator.is_main_process:\n+ if args.with_tracking:\n run = os.path.split(__file__)[-1].split(\".\")[0]\n accelerator.init_trackers(run, config)\n \ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 93e6764f6..559a5c963 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -75,7 +75,7 @@ def training_function(config, args):\n batch_size = int(config[\"batch_size\"])\n \n # We need to initialize the trackers we use, and also store our configuration\n- if args.with_tracking and accelerator.is_main_process:\n+ if args.with_tracking:\n run = os.path.split(__file__)[-1].split(\".\")[0]\n accelerator.init_trackers(run, config)\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks! Do you want to try to update the complete examples in this too? (The last spot would be in the docs, but I can update that :) )", "from_author": false }, { "body": "Sure, I will update all the example scripts in the `examples` folder in this PR :)", "from_author": true }, { "body": "@Gladiator07 great! It should just be the complete_nlp_example and complete_cv_example", "from_author": false }, { "body": "@muellerzr but many of the example scripts have the `is_main_process` check (for instance [here](https://github.com/huggingface/accelerate/blob/a3d94916a87d07471865261d015128870f513c38/examples/by_feature/deepspeed_with_config_support.py#L594)). Should I update them as well or just `complete_nlp_example` and `complete_cv_example` ?", "from_author": true }, { "body": "Ah, great point! Yes in that case those would be great to have done as well! :)", "from_author": false }, { "body": "Hi @muellerzr, I have removed the `is_main_process` check from all example scripts. Please check it once.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/643", "pr_id": 1031255408 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 8261524fd..4991ddbe4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -1032,6 +1032,7 @@ def wait_for_everyone(self):\n \"\"\"\n wait_for_everyone()\n \n+ @on_main_process\n def init_trackers(self, project_name: str, config: Optional[dict] = None, init_kwargs: Optional[dict] = {}):\n \"\"\"\n Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@Gladiator07 could you adjust the following code in the examples as well? Since this removes the need for the check:\r\n\r\nhttps://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py#L168-L172", "from_author": false }, { "body": "@muellerzr adjust as in? Should I remove the `accelerator.is_main_process` check?", "from_author": true }, { "body": "@Gladiator07 correct. We're putting this in place so that is no longer needed, so the examples should show this :)", "from_author": false }, { "body": "Cool, doing this in a separate PR.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/642", "pr_id": 1031120240 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 806f0094d..3fec513d2 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -11,6 +11,10 @@ jobs:\n runs-on: ubuntu-latest\n strategy:\n matrix:\n+ pytorch-version: [\n+ latest,\n+ minimum\n+ ]\n test-kind: [\n test_prod,\n test_core,\n@@ -43,6 +47,7 @@ jobs:\n if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi\n if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi\n if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi\n+ if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi\n \n - name: Run Tests\n run: |\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 353ccf894..27db19b9b 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -279,9 +279,7 @@ def __init__(\n self.native_amp = False\n err = \"{mode} mixed precision requires {requirement}\"\n if self.state.mixed_precision == \"fp16\":\n- self.native_amp = is_torch_version(\">=\", \"1.6\")\n- if not self.native_amp:\n- raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n+ self.native_amp = True\n if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n@@ -314,7 +312,7 @@ def __init__(\n # RNG Types\n self.rng_types = rng_types\n if self.rng_types is None:\n- self.rng_types = [\"torch\"] if is_torch_version(\"<=\", \"1.5.1\") else [\"generator\"]\n+ self.rng_types = [\"generator\"]\n \n @property\n def use_distributed(self):\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 311111ad4..6643fa579 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -29,6 +29,7 @@\n load_checkpoint_in_model,\n offload_state_dict,\n )\n+from .utils.versions import is_torch_version\n \n \n @contextmanager\n@@ -59,6 +60,8 @@ def init_empty_weights(include_buffers: bool = False):\n \n </Tip>\n \"\"\"\n+ if not is_torch_version(\">=\", \"1.9.0\"):\n+ raise NotImplementedError(\"Initializing empty weights to a meta device requires torch >= 1.9.0\")\n old_register_parameter = nn.Module.register_parameter\n if include_buffers:\n old_register_buffer = nn.Module.register_buffer\n@@ -114,6 +117,8 @@ def cpu_offload(\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n+ if not is_torch_version(\">=\", \"1.9.0\"):\n+ raise NotImplementedError(\"CPU offloading requires torch >= 1.9.0\")\n if execution_device is None:\n execution_device = next(iter(model.parameters())).device\n if state_dict is None:\n@@ -157,6 +162,8 @@ def disk_offload(\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n+ if not is_torch_version(\">=\", \"1.9.0\"):\n+ raise NotImplementedError(\"Disk offloading requires torch >= 1.9.0\")\n if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n offload_state_dict(offload_dir, model.state_dict())\n if execution_device is None:\n@@ -208,6 +215,8 @@ def dispatch_model(\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n+ if not is_torch_version(\">=\", \"1.9.0\"):\n+ raise NotImplementedError(\"Model dispatching requires torch >= 1.9.0\")\n # Error early if the device map is incomplete.\n check_device_map(model, device_map)\n \n@@ -304,6 +313,8 @@ def load_checkpoint_and_dispatch(\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n+ if not is_torch_version(\">=\", \"1.9.0\"):\n+ raise NotImplementedError(\"Loading and dispatching requires torch >= 1.9.0\")\n if isinstance(device_map, str) and device_map not in [\"auto\", \"balanced\", \"balanced_low_0\", \"sequential\"]:\n raise ValueError(\n \"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or \"\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex b8152b302..9236ee310 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -75,11 +75,11 @@ def total_dataset_length(self):\n \"timeout\": 0,\n \"worker_init_fn\": None,\n \"multiprocessing_context\": None,\n+ \"generator\": None,\n }\n \n # kwargs added after by version\n _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {\n- \"1.6.0\": {\"generator\": None},\n \"1.7.0\": {\"prefetch_factor\": 2, \"persistent_workers\": False},\n }\n \n@@ -412,7 +412,7 @@ def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = Fals\n self.split_batches = split_batches\n if is_torch_version(\"<\", \"1.8.0\"):\n raise ImportError(\n- \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n+ f\"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n )\n if shuffle:\n torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex 8ddc9af34..dcdead956 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -20,7 +20,7 @@\n import torch\n \n from .state import AcceleratorState\n-from .utils import PrecisionType, PrepareForLaunch, is_torch_version, patch_environment\n+from .utils import PrecisionType, PrepareForLaunch, patch_environment\n \n \n def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n@@ -90,12 +90,6 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n \n if num_processes > 1:\n # Multi-GPU launch\n- if is_torch_version(\"<\", \"1.5.0\"):\n- raise ImportError(\n- \"Using `notebook_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n- f\"{torch.__version__}.\"\n- )\n-\n from torch.multiprocessing import start_processes\n \n if len(AcceleratorState._shared_state) > 0:\n@@ -154,12 +148,6 @@ def debug_launcher(function, args=(), num_processes=2):\n num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n \"\"\"\n- if is_torch_version(\"<\", \"1.5.0\"):\n- raise ImportError(\n- \"Using `debug_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n- f\"{torch.__version__}.\"\n- )\n-\n from torch.multiprocessing import start_processes\n \n with tempfile.NamedTemporaryFile() as tmp_file:\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex 43d5ed7ef..faf8a7da8 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -10,6 +10,7 @@\n require_huggingface_suite,\n require_multi_gpu,\n require_single_gpu,\n+ require_torch_min_version,\n require_tpu,\n skip,\n slow,\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\nindex 07d794b42..6897d9084 100644\n--- a/src/accelerate/test_utils/scripts/test_script.py\n+++ b/src/accelerate/test_utils/scripts/test_script.py\n@@ -46,10 +46,9 @@ def rng_sync_check():\n if state.distributed_type == DistributedType.MULTI_GPU:\n synchronize_rng_states([\"cuda\"])\n assert are_the_same_tensors(torch.cuda.get_rng_state()), \"RNG states improperly synchronized on GPU.\"\n- if is_torch_version(\">=\", \"1.6.0\"):\n- generator = torch.Generator()\n- synchronize_rng_states([\"generator\"], generator=generator)\n- assert are_the_same_tensors(generator.get_state()), \"RNG states improperly synchronized in generator.\"\n+ generator = torch.Generator()\n+ synchronize_rng_states([\"generator\"], generator=generator)\n+ assert are_the_same_tensors(generator.get_state()), \"RNG states improperly synchronized in generator.\"\n \n if state.local_process_index == 0:\n print(\"All rng are properly synched.\")\n@@ -339,7 +338,7 @@ def main():\n if state.local_process_index == 0:\n print(\"\\n**DataLoader integration test**\")\n dl_preparation_check()\n- if state.distributed_type != DistributedType.TPU:\n+ if state.distributed_type != DistributedType.TPU and is_torch_version(\">=\", \"1.8.0\"):\n central_dl_preparation_check()\n \n # Trainings are not exactly the same in DeepSpeed and CPU mode\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 381f92c58..94e13412a 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -20,6 +20,7 @@\n import tempfile\n import unittest\n from distutils.util import strtobool\n+from functools import partial\n from pathlib import Path\n from typing import List, Union\n from unittest import mock\n@@ -132,6 +133,16 @@ def require_fsdp(test_case):\n return unittest.skipUnless(is_torch_version(\">=\", \"1.12.0\"), \"test requires torch version >= 1.12.0\")(test_case)\n \n \n+def require_torch_min_version(test_case=None, version=None):\n+ \"\"\"\n+ Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an\n+ installed torch version is less than the required one.\n+ \"\"\"\n+ if test_case is None:\n+ return partial(require_torch_min_version, version=version)\n+ return unittest.skipUnless(is_torch_version(\">=\", version), f\"test requires torch version >= {version}\")(test_case)\n+\n+\n def require_tensorboard(test_case):\n \"\"\"\n Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't\ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\nindex 4f3054b73..4f738f4d8 100644\n--- a/tests/test_big_modeling.py\n+++ b/tests/test_big_modeling.py\n@@ -27,7 +27,7 @@\n load_checkpoint_and_dispatch,\n )\n from accelerate.hooks import remove_hook_from_submodules\n-from accelerate.test_utils import require_cuda, require_multi_gpu, slow\n+from accelerate.test_utils import require_cuda, require_multi_gpu, require_torch_min_version, slow\n from accelerate.utils import offload_state_dict\n from transformers import AutoModelForCausalLM, AutoTokenizer\n \n@@ -79,6 +79,7 @@ def forward(self, x):\n return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n \n \n+@require_torch_min_version(version=\"1.9.0\")\n class BigModelingTester(unittest.TestCase):\n def test_init_empty_weights(self):\n # base use\ndiff --git a/tests/test_hooks.py b/tests/test_hooks.py\nindex 2e0519668..9d48db9e1 100644\n--- a/tests/test_hooks.py\n+++ b/tests/test_hooks.py\n@@ -27,7 +27,7 @@\n remove_hook_from_module,\n remove_hook_from_submodules,\n )\n-from accelerate.test_utils import require_multi_gpu\n+from accelerate.test_utils import require_multi_gpu, require_torch_min_version\n \n \n class ModelForTest(nn.Module):\n@@ -51,6 +51,7 @@ def post_forward(self, module, output):\n return output + 1\n \n \n+@require_torch_min_version(version=\"1.9.0\")\n class HooksModelTester(unittest.TestCase):\n def test_add_and_remove_hooks(self):\n test_model = ModelForTest()\ndiff --git a/tests/test_metrics.py b/tests/test_metrics.py\nindex b26ef00b2..9dd098456 100644\n--- a/tests/test_metrics.py\n+++ b/tests/test_metrics.py\n@@ -26,11 +26,13 @@\n require_huggingface_suite,\n require_multi_gpu,\n require_single_gpu,\n+ require_torch_min_version,\n )\n from accelerate.utils import get_launch_prefix, patch_environment\n \n \n @require_huggingface_suite\n+@require_torch_min_version(version=\"1.8.0\")\n class MetricTester(unittest.TestCase):\n def setUp(self):\n mod_file = inspect.getfile(accelerate.test_utils)\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex f5c36ee3d..1c6f6088d 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -21,6 +21,7 @@\n import torch.nn as nn\n \n from accelerate.test_utils import require_cuda, require_multi_gpu\n+from accelerate.test_utils.testing import require_torch_min_version\n from accelerate.utils.modeling import (\n check_device_map,\n clean_device_map,\n@@ -45,6 +46,7 @@ def forward(self, x):\n return self.linear2(self.batchnorm(self.linear1(x)))\n \n \n+@require_torch_min_version(version=\"1.9.0\")\n class ModelingUtilsTester(unittest.TestCase):\n def check_set_module_tensor_for_device(self, model, device1, device2):\n self.assertEqual(model.linear1.weight.device, torch.device(device1))\n", "code_comments": [ { "body": "It should be possible to import those regardless of the version (and then the actual use of it would trigger the error).", "diff_hunk": "@@ -16,7 +15,18 @@\n InitProcessGroupKwargs,\n find_executable_batch_size,\n infer_auto_device_map,\n+ is_torch_version,\n load_checkpoint_in_model,\n rich,\n synchronize_rng_states,\n )\n+\n+\n+if is_torch_version(\">=\", \"1.9.0\"):\n+ from .big_modeling import (\n+ cpu_offload,\n+ disk_offload,\n+ dispatch_model,\n+ init_empty_weights,\n+ load_checkpoint_and_dispatch,\n+ )", "from_author": false }, { "body": "Let's leave the type hint but as a forward reference?", "diff_hunk": "@@ -48,7 +49,7 @@ def save_accelerator_state(\n optimizers: list,\n schedulers: list,\n process_index: int,\n- scaler: GradScaler = None,\n+ scaler=None,", "from_author": false }, { "body": "```suggestion\r\ndef require_torch_min_version(test_case=None, version=None):\r\n```\r\nNaming isn't completely right IMO.", "diff_hunk": "@@ -132,6 +133,16 @@ def require_fsdp(test_case):\n return unittest.skipUnless(is_torch_version(\">=\", \"1.12.0\"), \"test requires torch version >= 1.12.0\")(test_case)\n \n \n+def require_torch_version(test_case=None, version=None):", "from_author": false }, { "body": "Not needed since we're raising the min torch", "diff_hunk": "@@ -48,7 +49,7 @@ def save_accelerator_state(\n optimizers: list,\n schedulers: list,\n process_index: int,\n- scaler: GradScaler = None,\n+ scaler=None,", "from_author": true }, { "body": "Fixed", "diff_hunk": "@@ -16,7 +15,18 @@\n InitProcessGroupKwargs,\n find_executable_batch_size,\n infer_auto_device_map,\n+ is_torch_version,\n load_checkpoint_in_model,\n rich,\n synchronize_rng_states,\n )\n+\n+\n+if is_torch_version(\">=\", \"1.9.0\"):\n+ from .big_modeling import (\n+ cpu_offload,\n+ disk_offload,\n+ dispatch_model,\n+ init_empty_weights,\n+ load_checkpoint_and_dispatch,\n+ )", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/638", "pr_id": 1026629204 }, { "diff": "diff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex cbbaeae16..5f425f200 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -60,6 +60,8 @@ class DistributedDataParallelKwargs(KwargsHandler):\n \n `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\n \n+ `static_graph` is only available in PyTorch 1.11.0 and later versions.\n+\n </Tip>\"\"\"\n \n dim: int = 0\n@@ -68,6 +70,7 @@ class DistributedDataParallelKwargs(KwargsHandler):\n find_unused_parameters: bool = False\n check_reduction: bool = False\n gradient_as_bucket_view: bool = False\n+ static_graph: bool = False\n \n \n @dataclass\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks! Could we add a warning to this please similar to the other one that exists for these kwargs in the docstring mentioning its only available on pytorch>=1.11?", "from_author": false }, { "body": "Sure, done", "from_author": true }, { "body": "@rom1504 looks like the last thing we need is to run `make style; make quality` :) ", "from_author": false }, { "body": "@muellerzr fixed", "from_author": true }, { "body": "Great! Thanks!!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/637", "pr_id": 1026327065 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 11aadc8e2..fcc80bb7f 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -26,7 +26,6 @@\n from typing import Dict, List\n \n import torch\n-import torch.distributed.run as distrib_run\n \n import psutil\n from accelerate.commands.config import default_config_file, load_config_from_file\n@@ -50,6 +49,10 @@\n from rich.logging import RichHandler\n \n \n+if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n+\n+\n FORMAT = \"%(message)s\"\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks for fixing!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/634", "pr_id": 1025316276 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 1ba5839e2..65c0e56b2 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -37,6 +37,7 @@ def get_cluster_input():\n num_machines = 1\n main_process_ip = None\n main_process_port = None\n+ rdzv_backend = \"static\"\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n@@ -56,6 +57,9 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+ rdzv_backend = _ask_field(\n+ \"What rendezvous backend will you use? ('static', 'c10d', ...)\", default=\"static\"\n+ )\n \n if distributed_type == DistributedType.NO:\n use_cpu = _ask_field(\n@@ -323,4 +327,5 @@ def get_cluster_input():\n deepspeed_config=deepspeed_config,\n fsdp_config=fsdp_config,\n use_cpu=use_cpu,\n+ rdzv_backend=rdzv_backend,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 11ca0ca90..e78aad181 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -137,6 +137,7 @@ class ClusterConfig(BaseConfig):\n num_machines: int = 1\n main_process_ip: Optional[str] = None\n main_process_port: Optional[int] = None\n+ rdzv_backend: Optional[str] = \"static\"\n main_training_function: str = \"main\"\n \n # args for deepspeed_plugin\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex ea27afb75..11aadc8e2 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -26,13 +26,13 @@\n from typing import Dict, List\n \n import torch\n+import torch.distributed.run as distrib_run\n \n import psutil\n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.state import get_int_from_env\n from accelerate.utils import (\n- TORCH_LAUNCH_PARAMS,\n ComputeEnvironment,\n DistributedType,\n PrecisionType,\n@@ -50,9 +50,6 @@\n from rich.logging import RichHandler\n \n \n-if is_torch_version(\">=\", \"1.9.0\"):\n- import torch.distributed.run as distrib_run\n-\n FORMAT = \"%(message)s\"\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n \n@@ -379,8 +376,7 @@ def multi_gpu_launcher(args):\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n setattr(args, \"nnodes\", str(num_machines))\n setattr(args, \"node_rank\", str(args.machine_rank))\n- setattr(args, \"master_addr\", str(args.main_process_ip))\n- setattr(args, \"master_port\", str(args.main_process_port))\n+ setattr(args, \"rdzv_endpoint\", f\"{args.main_process_ip}:{args.main_process_port}\")\n else:\n setattr(args, \"nproc_per_node\", str(num_processes))\n if args.main_process_port is not None:\n@@ -451,33 +447,19 @@ def multi_gpu_launcher(args):\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n- if is_torch_version(\">=\", \"1.9.0\"):\n- debug = getattr(args, \"debug\", False)\n- args = _filter_args(args)\n- with patch_environment(**current_env):\n- console = get_console()\n-\n- try:\n- distrib_run.run(args)\n- except:\n- if debug:\n- console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n- console.print_exception(suppress=[__file__], show_locals=False)\n- else:\n- # We still have to use subprocess, the user won't get a clean traceback as a result\n- cmd = get_launch_prefix()\n- for k, v in vars(args).items():\n- if k in TORCH_LAUNCH_PARAMS and v:\n- param = [f\"--{k}\"]\n- if type(v) != bool:\n- param.append(v)\n- cmd.extend(param)\n- cmd.append(args.training_script)\n- cmd.extend(args.training_script_args)\n- process = subprocess.Popen(cmd, env=current_env)\n- process.wait()\n- if process.returncode != 0:\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ if is_torch_version(\"<\", \"1.9.0\"):\n+ raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\n+\n+ debug = getattr(args, \"debug\", False)\n+ args = _filter_args(args)\n+ with patch_environment(**current_env):\n+ try:\n+ distrib_run.run(args)\n+ except:\n+ if debug:\n+ console = get_console()\n+ console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n+ console.print_exception(suppress=[__file__], show_locals=False)\n \n \n def deepspeed_launcher(args):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/631", "pr_id": 1025122718 }, { "diff": "diff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 99d109bd1..1b86e66ee 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -105,8 +105,6 @@ def training_function(config, args):\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking and accelerator.is_main_process:\n run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n accelerator.init_trackers(run, config)\n \n # Grab all the image filenames\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 094f6cfb5..93e6764f6 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -77,8 +77,6 @@ def training_function(config, args):\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking and accelerator.is_main_process:\n run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n accelerator.init_trackers(run, config)\n \n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 4e0900eb4..051cf8bc6 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -251,7 +251,7 @@ def __repr__(self):\n if self.distributed_type == DistributedType.DEEPSPEED:\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\n\"\n else:\n- f\"Mixed precision type: {mixed_precision}\\n\"\n+ repr += f\"Mixed precision type: {mixed_precision}\\n\"\n return repr\n \n # For backward compatibility\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/630", "pr_id": 1024597998 }, { "diff": "diff --git a/docs/source/usage_guides/mps.mdx b/docs/source/usage_guides/mps.mdx\nindex 43a00f104..3648bdce8 100644\n--- a/docs/source/usage_guides/mps.mdx\n+++ b/docs/source/usage_guides/mps.mdx\n@@ -72,8 +72,9 @@ accelerate launch /examples/cv_example.py --data_dir images\n \n ## A few caveats to be aware of\n \n-1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \n-This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.\n+1. We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. \n+It has major fixes related to model correctness and performance improvements for transformer based models.\n+Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details.\n 2. Distributed setups `gloo` and `nccl` are not working with `mps` device. \n This means that currently only single GPU of `mps` device type can be used.\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/629", "pr_id": 1024594829 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 7cb4529d9..541bda48e 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -367,7 +367,7 @@ def multi_gpu_launcher(args):\n if num_machines > 1:\n setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n setattr(args, \"nnodes\", str(num_machines))\n- setattr(args, \"machine_rank\", str(args.machine_rank))\n+ setattr(args, \"node_rank\", str(args.machine_rank))\n setattr(args, \"master_addr\", str(args.main_process_ip))\n setattr(args, \"master_port\", str(args.main_process_port))\n else:\n@@ -441,16 +441,16 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n if is_torch_version(\">=\", \"1.9.0\"):\n- distrib_args = _filter_args(args)\n+ args = _filter_args(args)\n with patch_environment(**current_env):\n- distrib_run.run(distrib_args)\n+ distrib_run.run(args)\n else:\n # We still have to use subprocess, the user won't get a clean traceback as a result\n cmd = get_launch_prefix()\n for k, v in vars(args).items():\n if k in TORCH_LAUNCH_PARAMS and v:\n param = [f\"--{k}\"]\n- if not v:\n+ if type(v) != bool:\n param.append(v)\n cmd.extend(param)\n cmd.append(args.training_script)\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex c6605ee26..1a452ca63 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -46,16 +46,12 @@ def _filter_args(args):\n Filters out all `accelerate` specific args\n \"\"\"\n distrib_args = distrib_run.get_args_parser()\n- known_args, _ = distrib_args.parse_known_args()\n- for arg in list(vars(args).keys()):\n- if arg not in vars(known_args).keys():\n- delattr(args, arg)\n- distrib_args = distrib_run.parse_args(vars(args))\n+ new_args, _ = distrib_args.parse_known_args()\n+\n for key, value in vars(args).items():\n- setattr(distrib_args, key, value)\n- if is_torch_version(\"<\", \"1.9.0\"):\n- setattr(distrib_args, \"use_env\", True)\n- return distrib_args\n+ if key in vars(new_args).keys():\n+ setattr(new_args, key, value)\n+ return new_args\n \n \n class PrepareForLaunch:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/627", "pr_id": 1024080683 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex c7f235843..8196413c5 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -37,7 +37,6 @@\n PrecisionType,\n PrepareForLaunch,\n _filter_args,\n- get_launch_prefix,\n is_deepspeed_available,\n is_rich_available,\n is_sagemaker_available,\n@@ -477,55 +476,58 @@ def multi_gpu_launcher(args):\n def deepspeed_launcher(args):\n if not is_deepspeed_available():\n raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\n- cmd = [\"deepspeed\", \"--no_local_rank\"]\n- if args.num_machines > 1:\n- if args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n- cmd = get_launch_prefix()\n+ num_processes = getattr(args, \"num_processes\")\n+ num_machines = getattr(args, \"num_machines\")\n+ main_process_ip = getattr(args, \"main_process_ip\")\n+ main_process_port = getattr(args, \"main_process_port\")\n+ if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n+ cmd = [\"deepspeed\", \"--no_local_rank\"]\n+ cmd.extend([\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)])\n+ if args.deepspeed_exclusion_filter is not None:\n cmd.extend(\n [\n- \"--nproc_per_node\",\n- str(args.num_processes // args.num_machines),\n- \"--nnodes\",\n- str(args.num_machines),\n- \"--node_rank\",\n- str(args.machine_rank),\n- \"--master_addr\",\n- args.main_process_ip,\n- \"--master_port\",\n- str(args.main_process_port),\n+ \"--exclude\",\n+ str(args.deepspeed_exclusion_filter),\n ]\n )\n- else:\n+ elif args.deepspeed_inclusion_filter is not None:\n cmd.extend(\n- [\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)]\n+ [\n+ \"--include\",\n+ str(args.deepspeed_inclusion_filter),\n+ ]\n )\n- if args.deepspeed_exclusion_filter is not None:\n- cmd.extend(\n- [\n- \"--exclude\",\n- str(args.deepspeed_exclusion_filter),\n- ]\n- )\n- elif args.deepspeed_inclusion_filter is not None:\n- cmd.extend(\n- [\n- \"--include\",\n- str(args.deepspeed_inclusion_filter),\n- ]\n- )\n- else:\n- cmd.extend([\"--num_gpus\", str(args.num_processes // args.num_machines)])\n+ else:\n+ cmd.extend([\"--num_gpus\", str(args.num_processes // args.num_machines)])\n+\n+ if args.module and args.no_python:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ elif args.module:\n+ cmd.append(\"--module\")\n+ elif args.no_python:\n+ cmd.append(\"--no_python\")\n+ cmd.append(args.training_script)\n+ cmd.extend(args.training_script_args)\n+ elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n+ setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n+ setattr(args, \"nnodes\", str(num_machines))\n+ setattr(args, \"node_rank\", int(args.machine_rank))\n+ if getattr(args, \"same_network\"):\n+ setattr(args, \"master_addr\", str(main_process_ip))\n+ setattr(args, \"master_port\", str(main_process_port))\n+ else:\n+ setattr(args, \"rdzv_endpoint\", f\"{main_process_ip}:{main_process_port}\")\n else:\n- cmd.extend([\"--num_gpus\", str(args.num_processes)])\n+ setattr(args, \"nproc_per_node\", str(num_processes))\n+ if main_process_port is not None:\n+ setattr(args, \"master_port\", str(main_process_port))\n \n if args.module and args.no_python:\n raise ValueError(\"--module and --no_python cannot be used together\")\n elif args.module:\n- cmd.append(\"--module\")\n+ setattr(args, \"module\", True)\n elif args.no_python:\n- cmd.append(\"--no_python\")\n- cmd.append(args.training_script)\n- cmd.extend(args.training_script_args)\n+ setattr(args, \"no_python\", True)\n \n current_env = os.environ.copy()\n try:\n@@ -558,10 +560,24 @@ def deepspeed_launcher(args):\n continue\n f.write(f\"{key}={value}\\n\")\n \n- process = subprocess.Popen(cmd, env=current_env)\n- process.wait()\n- if process.returncode != 0:\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ process = subprocess.Popen(cmd, env=current_env)\n+ process.wait()\n+ if process.returncode != 0:\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ else:\n+ if is_torch_version(\"<\", \"1.9.0\"):\n+ raise NotImplementedError(\"Multi-node training requires pytorch>=1.9.0\")\n+\n+ debug = getattr(args, \"debug\", False)\n+ args = _filter_args(args)\n+ with patch_environment(**current_env):\n+ try:\n+ distrib_run.run(args)\n+ except:\n+ if debug:\n+ console = get_console()\n+ console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n+ console.print_exception(suppress=[__file__], show_locals=False)\n \n \n def tpu_launcher(args):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> There is a bit too much in this PR to wrap my head around. Can we split it between multiGPU launcher fixes, DeepSpeed launcher fixes and other fixes? Thanks!\r\n\r\n1. MultiGPU launcher fixes and simplification was put in another PR by Zach #627 \r\n2. Minor other fixes are in #630 \r\n3. This will make deepspeed launcher updates to remove a call to subprocess", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/626", "pr_id": 1023919762 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 7cb4529d9..117932f2b 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -334,6 +334,8 @@ def simple_launcher(args):\n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\n+ if args.use_mps_device:\n+ current_env[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n if args.num_machines > 1:\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\n current_env[\"MASTER_PORT\"] = str(args.main_process_port)\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex bd339048f..4e0900eb4 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import os\n+import warnings\n from distutils.util import strtobool\n \n import torch\n@@ -221,6 +222,14 @@ def __init__(\n \"and/or you do not have an MPS-enabled device on this machine.\"\n )\n else:\n+ from .utils import is_torch_version\n+\n+ if not is_torch_version(\">\", \"1.12.0\"):\n+ warnings.warn(\n+ \"We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. \"\n+ \"It has major fixes related to model correctness and performance improvements for transformer based models. \"\n+ \"Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details.\"\n+ )\n self.device = torch.device(\"mps\")\n elif cpu or not torch.cuda.is_available():\n self.device = torch.device(\"cpu\")\n", "code_comments": [ { "body": "That warning may not age well once PyTorch 1.13 is released. Maybe:\r\n```suggestion\r\n \"We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. \"\r\n```", "diff_hunk": "@@ -221,6 +222,14 @@ def __init__(\n \"and/or you do not have an MPS-enabled device on this machine.\"\n )\n else:\n+ from .utils import is_torch_version\n+\n+ if not is_torch_version(\">\", \"1.12.0\"):\n+ warnings.warn(\n+ \"We strongly recommend to install the latest PyTorch nightly version (1.13.0.dev...) on your MacOS machine. \"", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/625", "pr_id": 1023444774 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 8e372adea..7cb4529d9 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -32,19 +32,26 @@\n from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.state import get_int_from_env\n from accelerate.utils import (\n+ TORCH_LAUNCH_PARAMS,\n ComputeEnvironment,\n DistributedType,\n PrecisionType,\n PrepareForLaunch,\n+ _filter_args,\n get_launch_prefix,\n is_deepspeed_available,\n is_sagemaker_available,\n+ is_torch_version,\n patch_environment,\n )\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from accelerate.utils.dataclasses import SageMakerDistributedType\n \n \n+if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n+\n+\n logger = logging.getLogger(__name__)\n \n \n@@ -355,43 +362,32 @@ def simple_launcher(args):\n \n \n def multi_gpu_launcher(args):\n- cmd = get_launch_prefix()\n- if args.num_machines > 1:\n- cmd.extend(\n- [\n- \"--nproc_per_node\",\n- str(args.num_processes // args.num_machines),\n- \"--nnodes\",\n- str(args.num_machines),\n- \"--node_rank\",\n- str(args.machine_rank),\n- \"--master_addr\",\n- args.main_process_ip,\n- \"--master_port\",\n- str(args.main_process_port),\n- ]\n- )\n+ num_processes = getattr(args, \"num_processes\")\n+ num_machines = getattr(args, \"num_machines\")\n+ if num_machines > 1:\n+ setattr(args, \"nproc_per_node\", str(num_processes // num_machines))\n+ setattr(args, \"nnodes\", str(num_machines))\n+ setattr(args, \"machine_rank\", str(args.machine_rank))\n+ setattr(args, \"master_addr\", str(args.main_process_ip))\n+ setattr(args, \"master_port\", str(args.main_process_port))\n else:\n- cmd.extend([\"--nproc_per_node\", str(args.num_processes)])\n+ setattr(args, \"nproc_per_node\", str(num_processes))\n if args.main_process_port is not None:\n- cmd.extend([\"--master_port\", str(args.main_process_port)])\n+ setattr(args, \"master_port\", str(args.main_process_port))\n \n if args.module and args.no_python:\n raise ValueError(\"--module and --no_python cannot be used together\")\n elif args.module:\n- cmd.append(\"--module\")\n+ setattr(args, \"module\", True)\n elif args.no_python:\n- cmd.append(\"--no_python\")\n- cmd.append(args.training_script)\n- cmd.extend(args.training_script_args)\n+ setattr(args, \"no_python\", True)\n \n current_env = os.environ.copy()\n+ mixed_precision = args.mixed_precision.lower()\n try:\n- mixed_precision = PrecisionType(args.mixed_precision.lower())\n+ mixed_precision = PrecisionType(mixed_precision)\n except ValueError:\n- raise ValueError(\n- f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n- )\n+ raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.\")\n \n if args.fp16:\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n@@ -444,10 +440,25 @@ def multi_gpu_launcher(args):\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n- process = subprocess.Popen(cmd, env=current_env)\n- process.wait()\n- if process.returncode != 0:\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ if is_torch_version(\">=\", \"1.9.0\"):\n+ distrib_args = _filter_args(args)\n+ with patch_environment(**current_env):\n+ distrib_run.run(distrib_args)\n+ else:\n+ # We still have to use subprocess, the user won't get a clean traceback as a result\n+ cmd = get_launch_prefix()\n+ for k, v in vars(args).items():\n+ if k in TORCH_LAUNCH_PARAMS and v:\n+ param = [f\"--{k}\"]\n+ if not v:\n+ param.append(v)\n+ cmd.extend(param)\n+ cmd.append(args.training_script)\n+ cmd.extend(args.training_script_args)\n+ process = subprocess.Popen(cmd, env=current_env)\n+ process.wait()\n+ if process.returncode != 0:\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n \n \n def deepspeed_launcher(args):\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex db21afc3a..2b8f566f0 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -2,7 +2,7 @@\n # There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n # module, but to preserve other warnings. So, don't check this module at all\n \n-from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME\n+from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS\n from .dataclasses import (\n ComputeEnvironment,\n DeepSpeedPlugin,\n@@ -91,7 +91,7 @@\n HfDeepSpeedConfig,\n )\n \n-from .launch import PrepareForLaunch, get_launch_prefix\n+from .launch import PrepareForLaunch, _filter_args, get_launch_prefix\n from .memory import find_executable_batch_size\n from .other import (\n extract_model_from_parallel,\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex 4e7c71853..934923bef 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -31,3 +31,30 @@\n DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\n \n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\n+\n+# These are the args for `torch.distributed.launch` for pytorch < 1.9\n+TORCH_LAUNCH_PARAMS = [\n+ \"nnodes\",\n+ \"nproc_per_node\",\n+ \"rdzv_backend\",\n+ \"rdzv_endpoint\",\n+ \"rdzv_id\",\n+ \"rdzv_conf\",\n+ \"standalone\",\n+ \"max_restarts\",\n+ \"monitor_interval\",\n+ \"start_method\",\n+ \"role\",\n+ \"module\",\n+ \"m\",\n+ \"no_python\",\n+ \"run_path\",\n+ \"log_dir\",\n+ \"r\",\n+ \"redirects\",\n+ \"t\",\n+ \"tee\",\n+ \"node_rank\",\n+ \"master_addr\",\n+ \"master_port\",\n+]\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex c74185c5d..c6605ee26 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -21,6 +21,12 @@\n from .dataclasses import DistributedType\n \n \n+if is_torch_version(\">=\", \"1.9.0\"):\n+ import torch.distributed.run as distrib_run\n+else:\n+ import torch.distributed.launch as distrib_run\n+\n+\n def get_launch_prefix():\n \"\"\"\n Grabs the correct launcher for starting a distributed command, such as either `torchrun`, `python -m\n@@ -35,6 +41,23 @@ def get_launch_prefix():\n return cmd\n \n \n+def _filter_args(args):\n+ \"\"\"\n+ Filters out all `accelerate` specific args\n+ \"\"\"\n+ distrib_args = distrib_run.get_args_parser()\n+ known_args, _ = distrib_args.parse_known_args()\n+ for arg in list(vars(args).keys()):\n+ if arg not in vars(known_args).keys():\n+ delattr(args, arg)\n+ distrib_args = distrib_run.parse_args(vars(args))\n+ for key, value in vars(args).items():\n+ setattr(distrib_args, key, value)\n+ if is_torch_version(\"<\", \"1.9.0\"):\n+ setattr(distrib_args, \"use_env\", True)\n+ return distrib_args\n+\n+\n class PrepareForLaunch:\n \"\"\"\n Prepare a function that will launched in a distributed setup.\n", "code_comments": [ { "body": "Clean!", "diff_hunk": "@@ -35,6 +36,21 @@ def get_launch_prefix():\n return cmd\n \n \n+def _filter_args(args):", "from_author": false }, { "body": "Please use a regular list here, it's going to be easier to maintain (in terms of diff)", "diff_hunk": "@@ -31,3 +31,8 @@\n DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\n \n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\n+\n+# These are the args for `torch.distributed.launch` for pytorch < 1.9\n+TORCH_LAUNCH_PARAMS = \"\"\"nnodes,nproc_per_node,rdzv_backend,rdzv_endpoint,rdzv_id,rdzv_conf,standalone,max_restarts,monitor_interval,start_method,role,module,m,no_python,run_path,log_dir,r,redirects,t,tee,node_rank,master_addr,master_port\"\"\".split(\n+ \",\"\n+)", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/623", "pr_id": 1022069195 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex 150afb75c..c4f050b3f 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -136,7 +136,7 @@ To run it in each of these various modes, use the following commands:\n ```\n - single GPU:\n ```bash\n- python ./nlp_example.py # from a server with a GPU\n+ python ./cv_example.py # from a server with a GPU\n ```\n - with fp16 (mixed-precision)\n * from any server by passing `fp16=True` to the `Accelerator`.\n", "code_comments": [], "context": [ { "body": "Thanks for the fix!", "from_author": false }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/622", "pr_id": 1021867447 }, { "diff": "diff --git a/docs/source/usage_guides/gradient_accumulation.mdx b/docs/source/usage_guides/gradient_accumulation.mdx\nindex f9f5dc62a..bd06efed9 100644\n--- a/docs/source/usage_guides/gradient_accumulation.mdx\n+++ b/docs/source/usage_guides/gradient_accumulation.mdx\n@@ -31,7 +31,6 @@ model.to(device)\n gradient_accumulation_steps = 2\n \n for index, batch in enumerate(training_dataloader):\n- optimizer.zero_grad()\n inputs, targets = batch\n inputs = inputs.to(device)\n targets = targets.to(device)\n@@ -42,6 +41,7 @@ for index, batch in enumerate(training_dataloader):\n if (index + 1) % gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n+ optimizer.zero_grad()\n ```\n \n ## Converting it to πŸ€— Accelerate\n@@ -57,7 +57,6 @@ First the code shown earlier will be converted to utilize πŸ€— Accelerate withou\n + )\n \n for index, batch in enumerate(training_dataloader):\n- optimizer.zero_grad()\n inputs, targets = batch\n - inputs = inputs.to(device)\n - targets = targets.to(device)\n@@ -68,6 +67,7 @@ First the code shown earlier will be converted to utilize πŸ€— Accelerate withou\n if (index+1) % gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n+ optimizer.zero_grad()\n ```\n \n <Tip warning={true}>\n@@ -94,7 +94,6 @@ You just wrap it around the entire training part of our code:\n - for index, batch in enumerate(training_dataloader):\n + for batch in training_dataloader:\n + with accelerator.accumulate(model):\n- optimizer.zero_grad()\n inputs, targets = batch\n outputs = model(inputs)\n ```\n@@ -107,6 +106,7 @@ You can remove all the special checks for the step number and the loss adjustmen\n - if (index+1) % gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n+ optimizer.zero_grad()\n ```\n \n As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. \n@@ -118,11 +118,11 @@ Below is the finished implementation for performing gradient accumulation with \n ```python\n for batch in training_dataloader:\n with accelerator.accumulate(model):\n- optimizer.zero_grad()\n inputs, targets = batch\n outputs = model(inputs)\n loss = loss_function(outputs, targets)\n accelerator.backward(loss)\n optimizer.step()\n scheduler.step()\n+ optimizer.zero_grad()\n ```\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/620", "pr_id": 1021713865 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex 271ffd3c5..150afb75c 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -184,6 +184,10 @@ To run it in each of these various modes, use the following commands:\n * In PyTorch:\n Add an `xmp.spawn` line in your script as you usually do.\n \n+### Simple vision example (GANs)\n+\n+- [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)\n+ \n ## Finer Examples\n \n While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.\n", "code_comments": [ { "body": "```suggestion\r\n- [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)\r\n```", "diff_hunk": "@@ -184,6 +184,10 @@ To run it in each of these various modes, use the following commands:\n * In PyTorch:\n Add an `xmp.spawn` line in your script as you usually do.\n \n+### Simple vision example (GANs)\n+\n+- Please see: https://github.com/huggingface/community-events/tree/main/huggan", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/619", "pr_id": 1021681480 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 8247aa5fe..4bc977029 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -50,7 +50,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex 145b0aa27..03a7198ac 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -17,5 +17,6 @@\n find_executable_batch_size,\n infer_auto_device_map,\n load_checkpoint_in_model,\n+ rich,\n synchronize_rng_states,\n )\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fcb26228a..ea27afb75 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -46,11 +46,15 @@\n )\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from accelerate.utils.dataclasses import SageMakerDistributedType\n+from rich import get_console\n+from rich.logging import RichHandler\n \n \n if is_torch_version(\">=\", \"1.9.0\"):\n import torch.distributed.run as distrib_run\n \n+FORMAT = \"%(message)s\"\n+logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n \n logger = logging.getLogger(__name__)\n \n@@ -301,7 +305,12 @@ def launch_command_parser(subparsers=None):\n \"--aws_secret_access_key\",\n type=str,\n default=None,\n- help=\"The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\",\n+ help=\"The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.\",\n+ )\n+ parser.add_argument(\n+ \"--debug\",\n+ action=\"store_true\",\n+ help=\"Whether to print out the torch.distributed stack trace when something fails.\",\n )\n parser.add_argument(\n \"training_script\",\n@@ -443,9 +452,17 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n if is_torch_version(\">=\", \"1.9.0\"):\n+ debug = getattr(args, \"debug\", False)\n args = _filter_args(args)\n with patch_environment(**current_env):\n- distrib_run.run(args)\n+ console = get_console()\n+\n+ try:\n+ distrib_run.run(args)\n+ except:\n+ if debug:\n+ console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n+ console.print_exception(suppress=[__file__], show_locals=False)\n else:\n # We still have to use subprocess, the user won't get a clean traceback as a result\n cmd = get_launch_prefix()\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex 1a452ca63..31d71ad21 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -23,8 +23,6 @@\n \n if is_torch_version(\">=\", \"1.9.0\"):\n import torch.distributed.run as distrib_run\n-else:\n- import torch.distributed.launch as distrib_run\n \n \n def get_launch_prefix():\ndiff --git a/src/accelerate/utils/rich.py b/src/accelerate/utils/rich.py\nnew file mode 100644\nindex 000000000..7a54c8b5a\n--- /dev/null\n+++ b/src/accelerate/utils/rich.py\n@@ -0,0 +1,18 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from rich.traceback import install\n+\n+\n+install(show_locals=False)\n", "code_comments": [ { "body": "I think this is a leftover of your tests?", "diff_hunk": "@@ -97,6 +98,8 @@ def collate_fn(examples):\n def training_function(config, args):\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ if accelerator.process_index == 1:\n+ raise ValueError()", "from_author": false }, { "body": "You put `rich` in the core install here but use an `is_rich_available` later on.", "diff_hunk": "@@ -50,7 +50,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],", "from_author": false }, { "body": "Should be its own list in a constant probably, so it's easier to read/add/see diff", "diff_hunk": "@@ -444,10 +441,29 @@ def multi_gpu_launcher(args):\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n- process = subprocess.Popen(cmd, env=current_env)\n- process.wait()\n- if process.returncode != 0:\n- raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+ # use torchrun directly here and try to call it\n+\n+ with patch_environment(**current_env):\n+ import torch.distributed.run as distrib_run\n+ debug = getattr(args, \"debug\", False)\n+ keys = \"\"\"nnodes,nproc_per_node,rdzv_backend,rdzv_endpoint,rdzv_id,rdzv_conf,standalone,max_restarts,monitor_interval,start_method,role,module,m,no_python,run_path,log_dir,r,redirects,t,tee,node_rank,master_addr,master_port,training_script,training_script_args\"\"\".split(\",\")", "from_author": false }, { "body": "Thanks, fixed this now! πŸ€— ", "diff_hunk": "@@ -97,6 +98,8 @@ def collate_fn(examples):\n def training_function(config, args):\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ if accelerator.process_index == 1:\n+ raise ValueError()", "from_author": true }, { "body": "As discussed offline, opted to make rich a true dep in Accelerate", "diff_hunk": "@@ -50,7 +50,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"rich\"],", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Nice utility! LGTM πŸ€—, I think Sylvain's suggestion would be a really good way to use this feature. Also, I think this won't result in rich traceback if the issue was in creating an `Accelerate` object itself, right? if so, should that be mentioned as part of the documentation?", "from_author": false }, { "body": "<s>In Multi GPU launcher, if one isn't using `--debug` then they won't see any error stack trace at all (which might be hard to read but important for root cause and resolution). This makes `--debug` sort of mandatory instead of optional. Can the user have normal error traceback if `--debug` isn't given? Please let me know if I am missing something or way off. </s>\r\n\r\nWent over it again with a sample code, \r\n```python\r\nfrom rich.traceback import \r\ninstall install(show_locals=True)\r\n``` \r\nmakes rich the default traceback handler. `--debug` meaning is no more what Sylvain was referring to, hence my confusion. ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/613", "pr_id": 1020716188 }, { "diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex c0c9c62f2..cb0a7ee42 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -57,10 +57,16 @@ jobs:\n git fetch && git checkout ${{ github.sha }}\n pip install -e . --no-deps\n \n- - name: Run test on GPUs\n+ - name: Run core and big modeling tests on GPUs\n run: |\n source activate accelerate\n- make test\n+ make test_big_modeling\n+ make test_core\n+\n+ - name: Run Integration tests on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_integrations\n \n - name: Run examples on GPUs\n run: |\ndiff --git a/Makefile b/Makefile\nindex 100a1484a..c1c79d6e8 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -44,6 +44,8 @@ test_examples:\n \tpython -m pytest -s -v ./tests/test_examples.py\n \n # Broken down example tests for the CI runners\n+test_integrations:\n+\tpython -m pytest -s -v ./tests/deepspeed ./tests/fsdp\n test_example_differences:\n \tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests\n \ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 30eb2a4d8..1be794421 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -35,7 +35,6 @@\n require_cuda,\n require_deepspeed,\n require_multi_gpu,\n- skip,\n slow,\n )\n from accelerate.test_utils.training import RegressionDataset\n@@ -697,7 +696,6 @@ def test_performance(self):\n with patch_environment(omp_num_threads=1):\n execute_subprocess_async(cmd_stage, env=os.environ.copy())\n \n- @skip\n def test_checkpointing(self):\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\n cmd = [\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thank you πŸ˜„. This is neat and finally all the tests would run as expected πŸ€—. ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/612", "pr_id": 1020571705 }, { "diff": "diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/build_and_run_tests.yml\nsimilarity index 82%\nrename from .github/workflows/check_dependencies.yml\nrename to .github/workflows/build_and_run_tests.yml\nindex f8ac1f492..a5378353d 100644\n--- a/.github/workflows/check_dependencies.yml\n+++ b/.github/workflows/build_and_run_tests.yml\n@@ -10,7 +10,7 @@ env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n \n jobs:\n- check-for-setup:\n+ check-for-source:\n runs-on: ubuntu-latest\n name: Check if setup was changed\n outputs:\n@@ -28,7 +28,7 @@ jobs:\n id: was_changed\n run: |\n for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n- if [ `basename \"${file}\"` = \"setup.py\" ]; then\n+ if [ `basename \"${file}\"` == \"setup.py\" ]; then\n echo ::set-output name=changed::\"1\"\n fi\n done\n@@ -36,10 +36,10 @@ jobs:\n build-docker-containers:\n needs: check-for-setup\n if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\n- uses: ./.github/workflows/build-docker-images.yml\n+ uses: ./.github/workflows/build_docker_images.yml\n secrets: inherit\n \n- run-tests:\n+ run-merge-tests:\n needs: build-docker-containers\n if: always()\n- uses: ./.github/workflows/on-merge.yml\n\\ No newline at end of file\n+ uses: ./.github/workflows/run_merge_tests.yml\n\\ No newline at end of file\ndiff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build_docker_images.yml\nsimilarity index 100%\nrename from .github/workflows/build-docker-images.yml\nrename to .github/workflows/build_docker_images.yml\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/run_merge_tests.yml\nsimilarity index 100%\nrename from .github/workflows/on-merge.yml\nrename to .github/workflows/run_merge_tests.yml\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex d39d36951..5d8ff0c1b 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -1,6 +1,13 @@\n name: Run Tests\n \n-on: [pull_request]\n+on:\n+ pull_request:\n+ paths:\n+ - \"src/**\"\n+ - \"tests/**\"\n+ - \".github/**\"\n+ - \"setup.py\"\n+ types: [opened, synchronize, reopened]\n \n env:\n HF_HOME: ~/hf_cache\n", "code_comments": [ { "body": "I feel this name should be `Checks if setup.py were changed`, as I can only see it checks that file.\r\n\r\nAnd on merge event to `main`, it seems the tests are always run.", "diff_hunk": "@@ -10,9 +10,9 @@ env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n \n jobs:\n- check-for-setup:\n+ check-for-source:\n runs-on: ubuntu-latest\n- name: Check if setup was changed\n+ name: Checks if source code or tests were changed", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "So the purpose of `check-for-source` in `build_and_run_tests.yml` and `test.yml` are different?\r\n\r\n- on PR request: check source file, and run the tests only if any changed found.\r\n- on merge (to main): check `setup.py` to build the image if necessary. Then always run the tests?", "from_author": false }, { "body": "I am probably wrong above, as I see you have `needs: build-docker-containers`. \r\nBut I am confused a bit: currently (without this PR), what happens (on merge) if `setup.py` is not changed, but some `.py` files change?\r\n\r\nI will review in more detail tomorrow.\r\n", "from_author": false }, { "body": "@ydshieh if some `.py` was changed but not the setup then the tests are ran but Docker images won't be rebuilt", "from_author": true }, { "body": "Thanks, @muellerzr . So in this PR, what happens (on merge) if no `.py` file changes at all (`setup.py` not change + no other `.py` change)? If I understand, the tests still run. Is this the expected behavior?", "from_author": false }, { "body": "My only remaining question is as above: on merge, should we skip the tests if no `.py` file changes at all. Otherwise, all LGTM.", "from_author": false }, { "body": "@ydshieh (sorry this got buried in my priorities):\r\n\r\nRe: on merge, yes it is because we never know if some other dependency has a regression issue or what. So we're still made aware of when this happens post merge and can immediately know it's unrelated to something directly in that PR without it silently not failing until the nightlies", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/611", "pr_id": 1020543878 }, { "diff": "diff --git a/docs/source/index.mdx b/docs/source/index.mdx\nindex 3f0f58802..1664fd7a2 100644\n--- a/docs/source/index.mdx\n+++ b/docs/source/index.mdx\n@@ -55,7 +55,7 @@ accelerate launch {my_script.py}\n ><div class=\"w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Tutorials</div>\n <p class=\"text-gray-700\">Learn the basics and become familiar with using πŸ€— Accelerate. Start here if you are using πŸ€— Accelerate for the first time!</p>\n </a>\n- <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/utility_guides/gradient_accumulation\"\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/usage_guides/gradient_accumulation\"\n ><div class=\"w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">How-to guides</div>\n <p class=\"text-gray-700\">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use πŸ€— Accelerate to solve real-world problems.</p>\n </a>\n@@ -68,4 +68,4 @@ accelerate launch {my_script.py}\n <p class=\"text-gray-700\">Technical descriptions of how πŸ€— Accelerate classes and methods work.</p>\n </a>\n </div>\n-</div>\n\\ No newline at end of file\n+</div>\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/610", "pr_id": 1020495465 }, { "diff": "diff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 42868a0a5..3ba46a03f 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n return data\n \n \n-def send_to_device(tensor, device):\n+def send_to_device(tensor, device, non_blocking=False):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n \n@@ -117,13 +117,16 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ try:\n+ return t.to(device, non_blocking=non_blocking)\n+ except TypeError: # .to() doesn't accept non_blocking as kwarg\n+ return t.to(device)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")\n \n- return recursively_apply(_send_to_device, tensor, device, test_type=_has_to_method)\n+ return recursively_apply(_send_to_device, tensor, device, non_blocking, test_type=_has_to_method)\n \n \n def get_data_structure(data):\n", "code_comments": [ { "body": "So far all objects in Pytorch which accept `.to()` method accept the `non_blocking` kwarg or at least kwargs in general, but are we sure this is going to be always the case?\r\n\r\n- [torch.jit.ScriptModule.to](https://pytorch.org/docs/stable/generated/torch.jit.ScriptModule.html?highlight=#torch.jit.ScriptModule.to) (Python method, in ScriptModule)\r\n- [torch.nn.Module.to](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=#torch.nn.Module.to) (Python method, in Module)\r\n- [torch.nn.utils.rnn.PackedSequence.to](https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.PackedSequence.html?highlight=#torch.nn.utils.rnn.PackedSequence.to) (Python method, in PackedSequence)\r\n- [torch.Tensor.to](https://pytorch.org/docs/stable/generated/torch.Tensor.to.html?highlight=#torch.Tensor.to) (Python method, in torch.Tensor.to)\r\n\r\ncc @sgugger ", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "This is for our own objects FYI, `BatchEncoding` in Transformers, which does not accept the `non_blocking` argument :-/\r\nMaybe we can inspect the signature and only pass it when we see it?", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": false }, { "body": "I tried using `t.to.__code__` and `inspect.signature(t.to)` and both give errors. Do you have a better way to get kwargs of a `builtin_function_or_method` @sgugger ?", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "Mmm, `inspect.signature(t.to)` should give you access to the keyword arguments. You can find them in the `.parameters` attribute, which will be a dict-like object.", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": false }, { "body": "I get this error when trying `inspect.signature(t.to)` where `t` is a tensor\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 3111, in signature\r\n return Signature.from_callable(obj, follow_wrapped=follow_wrapped)\r\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 2860, in from_callable\r\n return _signature_from_callable(obj, sigcls=cls,\r\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 2327, in _signature_from_callable\r\n return _signature_from_builtin(sigcls, obj,\r\n File \"/home/nouamane/miniconda3/envs/hf/lib/python3.9/inspect.py\", line 2145, in _signature_from_builtin\r\n raise ValueError(\"no signature found for builtin {!r}\".format(func))\r\nValueError: no signature found for builtin <built-in method to of Tensor object at 0x7f7e98c57720>\r\n```", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "I think this is related to the warning in [inspect's docs](https://docs.python.org/3/library/inspect.html#inspect.signature)\r\n\r\n> Note: Some callables may not be introspectable in certain implementations of Python. For example, in CPython, some built-in functions defined in C provide no metadata about their arguments.", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "Maybe we can isolate tensors in the function then? Something like:\r\n```\r\nif isinstance(t, torch.Tensor):\r\n return t.to(device, non_blocking=non_blocking)\r\nelse:\r\n # inspect signature here in a try/except block\r\n```", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": false }, { "body": "Wouldn't this be enough, or should I inspect signature?\r\n ```python\r\n def _send_to_device(t, device, non_blocking):\r\n if isinstance(t, torch.Tensor):\r\n return t.to(device, non_blocking=non_blocking)\r\n else:\r\n try:\r\n return t.to(device, non_blocking=non_blocking)\r\n except TypeError:\r\n return t.to(device)\r\n```", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "Oh in this case, you don't even need the first test. Are you sure about the type of `TypeError`?", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": false }, { "body": "Searching on google for `python unexpected argument`, it's usually `TypeError: ... got an unexpected keyword`. I'm not sure if it's always the case though", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "Seems to work for C defined methods as well:\r\n```python\r\n>> t.__abs__(a=5)\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nTypeError: Tensor.abs() takes no keyword arguments\r\n```\r\n\r\n\r\n```python\r\n>> t.to(a=5)\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nTypeError: to() received an invalid combination of arguments - got (a=int, ), but expected one of:\r\n * (torch.device device, torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)\r\n * (torch.dtype dtype, bool non_blocking, bool copy, *, torch.memory_format memory_format)\r\n * (Tensor tensor, bool non_blocking, bool copy, *, torch.memory_format memory_format)\r\n```", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": true }, { "body": "Nice!", "diff_hunk": "@@ -117,13 +117,13 @@ def send_to_device(tensor, device):\n The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n- def _send_to_device(t, device):\n- return t.to(device)\n+ def _send_to_device(t, device, non_blocking):\n+ return t.to(device, non_blocking=non_blocking)\n \n def _has_to_method(t):\n return hasattr(t, \"to\")", "from_author": false }, { "body": "@sgugger the docs say that `tensor` is a torch.Tensor here, which isn't necessary the case iiuc. We can probably update the docs as well", "diff_hunk": "@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n return data\n \n \n-def send_to_device(tensor, device):\n+def send_to_device(tensor, device, non_blocking=False):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.", "from_author": true }, { "body": "True", "diff_hunk": "@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n return data\n \n \n-def send_to_device(tensor, device):\n+def send_to_device(tensor, device, non_blocking=False):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.", "from_author": false }, { "body": "We could leave that for another PR ", "diff_hunk": "@@ -103,7 +103,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n return data\n \n \n-def send_to_device(tensor, device):\n+def send_to_device(tensor, device, non_blocking=False):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "This issue has been automatically marked as stale because it has not had recent activity. If you think this still needs to be addressed please comment on this thread.\n\nPlease note that issues that do not follow the [contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) are likely to be ignored.", "from_author": false }, { "body": "Hi @sgugger, was there a reason that this was closed and not merged? I came across this when I was about to request the same thing. ", "from_author": false }, { "body": "@Chris-hughes10 it was automatically closed by the stale bot due to the absence of activity. @NouamaneTazi is still investigating whether this could have some negative impact sometimes.", "from_author": false }, { "body": "### Should we make .to(non_blocking=True) the default in **accelerate**?\r\nLet’s use this script to find out\r\n\r\n```python\r\nimport torch\r\nif __name__ == '__main__':\r\n seed = 0\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n stream = torch.cuda.current_stream()\r\n\r\n x = torch.rand(32, 256, 220, 220).cuda()\r\n\r\n t = (x.min() - x.max()).to(torch.device(\"cpu\"), non_blocking=False) # try with False then True\r\n # t = (x.min() - x.max()).to(\"cuda:1\", non_blocking=True) # try GPU0 to GPU1 copy\r\n\r\n print(stream.query()) # False - Checks if all the work submitted has been completed.\r\n print(t)\r\n stream.synchronize() # wait for stream to finish the work\r\n \r\n print(stream.query()) # True - work done\r\n print(t)\r\n```\r\n\r\n<details>\r\n<summary>Copy to CPU with non_blocking=False (default)</summary>\r\n\r\nIn this case `.to()` adds a `cudaStreamSynchronize` op which makes the CPU use the correct value of the tensor when printing\r\n![image](https://user-images.githubusercontent.com/29777165/194038267-91033b35-fc18-4a0d-9af1-cfb1e382721e.png)\r\n\r\n</details>\r\n\r\n<details>\r\n<summary>Copy to CPU with non_blocking=True </summary>\r\n\r\n\r\nIn this case the CPU submits the kernels for `.to()` to the GPU then moves on to perform the print operation which uses an incorrect value for the tensor `tensor(0.)` **(The dangerous part)** \r\n![image](https://user-images.githubusercontent.com/29777165/194038545-88833ffc-5141-4fd4-a503-c140202f0bed.png)\r\n\r\n</details>\r\n\r\n<details>\r\n<summary>Copy to another GPU with non_blocking=True </summary>\r\n\r\nIt seems that the `non_blocking` here doesn’t do much (we get basically the same thing using `non_blocking=True` ). In both cases we have GPU 1 waiting for GPU 0 to finish working on the tensor, and THEN copy it to GPU 1. And finally the CPU prints the tensor that’s now located on GPU 1\r\nIn this case `.to()` creates a `cudaStreamWaitEvent` event (figure 2) which makes GPU 1 waits for GPU 0. I made [an issue](https://discuss.pytorch.org/t/to-cuda-1-non-blocking-true-creates-cudastreamwaitevent/162296) on Pytorch’s forums to investigate why is this the case\r\n\r\n![image](https://user-images.githubusercontent.com/29777165/194038566-56a7c178-d659-4c3f-a759-d94da8c0f152.png)\r\n\r\n![image](https://user-images.githubusercontent.com/29777165/194038587-5a089b5c-af42-4bab-86b1-516e2ebe3300.png)\r\n\r\n</details>\r\n\r\n**tldr;** `non_blocking` could be a game changer in using your GPUs efficiently. \r\n- **Good use scenario:** Copying your data from CPU to GPU in a non_blocking way then running your model which exists on GPU (this would make the CPU launch the copy kernel, then moves on to queuing other kernels in your model on the GPU. As opposed to waiting for the copy to end, and only then launching kernels from the model). [Example from Pytorch's repo](https://github.com/pytorch/examples/blob/main/imagenet/main.py#L321-L326).\r\n- **Bad use scenario:** Copying your data from CPU to GPU in a non_blocking way then start some operations **on CPU** that would use the non-ready tensors. (could be in `if` statements, or simple arithmetics...)\r\n\r\n\r\n=> It’s good to support that argument in accelerate but it’s better to keep the default as it is, just like it’s the case in Pytorch ", "from_author": true }, { "body": "@NouamaneTazi These are really interesting insights! If you are looking at inspecting signatures, do you think it would be too much complexity to set non-blocking automatically based on whether it is CPU -> GPU transfer or GPU -> CPU transfer by inspecting the tensor's current device?\r\n\r\nIf you like the idea, perhaps this would be a different feature though, toggled by an accelerator flag?", "from_author": false }, { "body": "@Chris-hughes10 CPU -> GPU and GPU -> CPU both lead to the same issues as mentioned above. Only GPU -> GPU is the safe operation but as I said above, it seems that it requires the two GPUs synchronization whether we set `non_blocking=True` or not", "from_author": true }, { "body": "> @Chris-hughes10 CPU -> GPU and GPU -> CPU both lead to the same issues as mentioned above. Only GPU -> GPU is the safe operation but as I said above, it seems that it requires the two GPUs synchronization whether we set `non_blocking=True` or not\r\n\r\nApologies, I misread the post above. Please ignore my previous suggestion!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/607", "pr_id": 1018728968 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 188800527..467a757b4 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -677,7 +677,7 @@ def load_checkpoint_in_model(\n else:\n for param_name, param in checkpoint.items():\n module_name = param_name\n- if dtype is not None:\n+ if dtype is not None and not str(param.dtype).startswith((\"torch.uint\", \"torch.int\", \"torch.bool\")):\n param = param.to(dtype)\n while len(module_name) > 0 and module_name not in device_map:\n module_name = \".\".join(module_name.split(\".\")[:-1])\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/606", "pr_id": 1017453798 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 673975fc7..839a7a2a9 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -209,7 +209,7 @@ def __init__(self, run_name: str, **kwargs):\n \n @property\n def tracker(self):\n- return self.run.run\n+ return self.run\n \n def store_init_configuration(self, values: dict):\n \"\"\"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/605", "pr_id": 1017337994 }, { "diff": "diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex a8c1aedac..30eb2a4d8 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -35,6 +35,7 @@\n require_cuda,\n require_deepspeed,\n require_multi_gpu,\n+ skip,\n slow,\n )\n from accelerate.test_utils.training import RegressionDataset\n@@ -642,7 +643,9 @@ def setUp(self):\n \"deepspeed_stage_1_fp16\": 1600,\n \"deepspeed_stage_2_fp16\": 2500,\n \"deepspeed_stage_3_zero_init_fp16\": 2800,\n- \"deepspeed_stage_3_cpu_offload_fp16\": 1900,\n+ # Disabling below test as it overwhelms the RAM memory usage\n+ # on CI self-hosted runner leading to tests getting killed.\n+ # \"deepspeed_stage_3_cpu_offload_fp16\": 1900,\n }\n self.n_train = 160\n self.n_val = 160\n@@ -694,6 +697,7 @@ def test_performance(self):\n with patch_environment(omp_num_threads=1):\n execute_subprocess_async(cmd_stage, env=os.environ.copy())\n \n+ @skip\n def test_checkpointing(self):\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\n cmd = [\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\nindex 628e377c3..249d2b692 100644\n--- a/tests/fsdp/test_fsdp.py\n+++ b/tests/fsdp/test_fsdp.py\n@@ -191,7 +191,9 @@ def setUp(self):\n \"multi_gpu_fp16\": 3200,\n \"fsdp_shard_grad_op_transformer_based_wrap_fp16\": 2000,\n \"fsdp_full_shard_transformer_based_wrap_fp16\": 1900,\n- \"fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32\": 1500, # fp16 was leading to indefinite hang\n+ # Disabling below test as it overwhelms the RAM memory usage\n+ # on CI self-hosted runner leading to tests getting killed.\n+ # \"fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32\": 1500, # fp16 was leading to indefinite hang\n }\n self.n_train = 160\n self.n_val = 160\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/604", "pr_id": 1017176784 }, { "diff": "diff --git a/docs/source/concept_guides/training_tpu.mdx b/docs/source/concept_guides/training_tpu.mdx\nindex 32736ce31..7fe54b14a 100644\n--- a/docs/source/concept_guides/training_tpu.mdx\n+++ b/docs/source/concept_guides/training_tpu.mdx\n@@ -89,7 +89,7 @@ like:\n ProcessExitedException: process 0 terminated with signal SIGSEGV\n ```\n \n-This error is *extremely* cryptic but the basic explaination is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \n+This error is *extremely* cryptic but the basic explanation is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \n accept a single `model` argument, and declare it in an outside cell:\n \n ```python\n@@ -137,7 +137,7 @@ accelerator = Accelerator(mixed_precision=\"bf16\")\n By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. \n The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.\n \n-There is a futher configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then \n+There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then \n `torch.float` is `bfloat16` and `torch.double` is `float32`.\n \n This is performed in the `Accelerator` object when passing `downcast_bf16=True`:\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 812b20226..6d92e7958 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -391,7 +391,7 @@ and [`~Accelerator.clip_grad_value_`] respectively.\n \n If you are running your training in Mixed Precision with πŸ€— Accelerate, you will get the best result with your loss being\n computed inside your model (like in Transformer models for instance). Every computation outside of the model will be\n-executed in full precision (which is generally what you want for loss computation, expecially if it involves a\n+executed in full precision (which is generally what you want for loss computation, especially if it involves a\n softmax). However you might want to put your loss computation inside the *accelerator.autocast* context manager:\n \n ```\ndiff --git a/docs/source/usage_guides/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\nindex 243b354d0..9b3252809 100644\n--- a/docs/source/usage_guides/big_modeling.mdx\n+++ b/docs/source/usage_guides/big_modeling.mdx\n@@ -213,7 +213,7 @@ You can let πŸ€— Accelerate handle the device map computation by setting `device\n \n </Tip>\n \n-All the options will produce the same result when you don't have enough GPU memory to accomodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). \n+All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). \n \n When you have more GPU memory available than the model size, here the difference between each option:\n - `\"auto\"` and `\"balanced\"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1.\ndiff --git a/docs/source/usage_guides/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\nindex c818c9847..7d6bbbf99 100644\n--- a/docs/source/usage_guides/checkpoint.mdx\n+++ b/docs/source/usage_guides/checkpoint.mdx\n@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.\n # Checkpointing\n \n When training a PyTorch model with πŸ€— Accelerate, you may often want to save and continue a state of training. Doing so requires\n-saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside πŸ€— Accelerate are two convience functions to achieve this quickly:\n+saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside πŸ€— Accelerate are two convenience functions to achieve this quickly:\n - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\n - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\n \ndiff --git a/docs/source/usage_guides/deepspeed.mdx b/docs/source/usage_guides/deepspeed.mdx\nindex 8a4238732..29561c77b 100644\n--- a/docs/source/usage_guides/deepspeed.mdx\n+++ b/docs/source/usage_guides/deepspeed.mdx\n@@ -68,7 +68,7 @@ Inference:\n \n ## How it works?\n \n-**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation)\n for more information.\n \n We will first look at easy to use integration via `accelerate config`. \n@@ -383,13 +383,13 @@ We will look at the changes needed in the code when using these.\n ```\n b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.\n- In the above example we can see that the code reamins unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n+ In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n \n c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. \n In this situation, user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. \n \n d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. \n- This will result in an error because one can only use DS Scheduler when using DS Optim.\n+ This will result in an error because you can only use DS Scheduler when using DS Optim.\n \n 2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method \n based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n@@ -435,7 +435,7 @@ ZeRO Stage-3 has 2 options:\n logging.warning(f\"Failure {status_msg}\")\n ``` \n This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory.\n- One can use this script to do offline consolidation. \n+ You can use this script to do offline consolidation. \n It requires no configuration files or GPUs. Here is an example of its usage: \n ```bash\n $ cd /path/to/checkpoint_dir\n@@ -444,14 +444,14 @@ ZeRO Stage-3 has 2 options:\n Detected checkpoint of type zero stage 3, world_size: 2\n Saving fp32 state dict to pytorch_model.bin (total_numel=60506624)\n ```\n- To get 32bit model for saving/inference, one can do the following:\n+ To get 32bit model for saving/inference, you can perform:\n ```python\n from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint\n \n unwrapped_model = accelerator.unwrap_model(model)\n fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir)\n ```\n- If only interested in state_dict, one can do the following:\n+ If you are only interested in the `state_dict`, you can do the following:\n ```python\n from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint\n \n@@ -462,7 +462,7 @@ ZeRO Stage-3 has 2 options:\n ## ZeRO Inference\n DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. \n It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant.\n-With accelerate integration, one has to just prepare model and dataloader as shown below:\n+With accelerate integration, you just need to prepare the model and dataloader as shown below:\n \n ```python\n model, eval_dataloader = accelerator.prepare(model, eval_dataloader)\ndiff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\nindex 7c61b21e9..cc5c17418 100644\n--- a/docs/source/usage_guides/tracking.mdx\n+++ b/docs/source/usage_guides/tracking.mdx\n@@ -104,7 +104,7 @@ Every tracker must implement three functions and have three properties:\n - This should be implemented as a `@property` function \n - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.\n \n-A brief example can be seen below with an integration with Weights and Biases, containing only the relevent information:\n+A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information:\n ```python\n from accelerate.tracking import GeneralTracker\n from typing import Optional\n", "code_comments": [ { "body": "```suggestion\r\nTo introduce special behavior in your script for TPUs you can check the `distributed_type` of your\r\n```", "diff_hunk": "@@ -261,7 +261,7 @@ lot of time. In practice, that means you must take special care to have all your\n shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that\n have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\n \n-To introduce special behavior in your script for TPUs you can check the `distributed_type` of your\n+To introduce special behaviour in your script for TPUs you can check the `distributed_type` of your", "from_author": false }, { "body": "```suggestion\r\nInternally, the library works by first analyzing the environment in which the script is launched to determine which\r\n```", "diff_hunk": "@@ -447,7 +447,7 @@ will be added in a next version.\n \n ## Internal mechanism\n \n-Internally, the library works by first analyzing the environment in which the script is launched to determine which\n+Internally, the library works by first analysing the environment in which the script is launched to determine which", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/603", "pr_id": 1016367735 }, { "diff": "diff --git a/src/accelerate/test_utils/scripts/test_checkpointing.py b/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py\nsimilarity index 100%\nrename from src/accelerate/test_utils/scripts/test_checkpointing.py\nrename to src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py\ndiff --git a/src/accelerate/test_utils/scripts/test_peak_memory_usage.py b/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py\nsimilarity index 100%\nrename from src/accelerate/test_utils/scripts/test_peak_memory_usage.py\nrename to src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py\ndiff --git a/src/accelerate/test_utils/scripts/test_performance.py b/src/accelerate/test_utils/scripts/external_deps/test_performance.py\nsimilarity index 100%\nrename from src/accelerate/test_utils/scripts/test_performance.py\nrename to src/accelerate/test_utils/scripts/external_deps/test_performance.py\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 350d95320..c394b50cc 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -383,7 +383,7 @@ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must\n def set_mixed_precision(self, mixed_precision):\n ds_config = self.deepspeed_config\n if mixed_precision == \"fp16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n- ds_config.update({\"fp16\": {\"enabled\": True}})\n+ ds_config.update({\"fp16\": {\"enabled\": True, \"auto_cast\": True}})\n elif mixed_precision == \"bf16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n ds_config.update({\"bf16\": {\"enabled\": True}})\n \ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 6b37eb93e..a8c1aedac 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -594,7 +594,9 @@ def test_autofill_dsconfig(self):\n \n def test_basic_run(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n- test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_performance.py\"])\n+ test_file_path = os.path.sep.join(\n+ mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\", \"test_performance.py\"]\n+ )\n with tempfile.TemporaryDirectory() as dirpath:\n cmd = [\n \"accelerate\",\n@@ -634,7 +636,7 @@ def setUp(self):\n \n self.stages = [1, 2, 3]\n self.zero3_offload_config = False\n- self.performance_lower_bound = 0.83\n+ self.performance_lower_bound = 0.82\n self.peak_memory_usage_upper_bound = {\n \"multi_gpu_fp16\": 3200,\n \"deepspeed_stage_1_fp16\": 1600,\n@@ -646,7 +648,7 @@ def setUp(self):\n self.n_val = 160\n \n mod_file = inspect.getfile(accelerate.test_utils)\n- self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\"])\n+ self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\"])\n \n def test_performance(self):\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_performance.py\")\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\nindex 8ad088c04..628e377c3 100644\n--- a/tests/fsdp/test_fsdp.py\n+++ b/tests/fsdp/test_fsdp.py\n@@ -182,7 +182,7 @@ def test_cpu_offload(self):\n class FSDPIntegrationTest(TempDirTestCase):\n def setUp(self):\n super().setUp()\n- self.performance_lower_bound = 0.83\n+ self.performance_lower_bound = 0.82\n self.performance_configs = [\n \"fsdp_shard_grad_op_transformer_based_wrap\",\n \"fsdp_full_shard_transformer_based_wrap\",\n@@ -197,7 +197,7 @@ def setUp(self):\n self.n_val = 160\n \n mod_file = inspect.getfile(accelerate.test_utils)\n- self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\"])\n+ self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\"])\n \n def test_performance(self):\n self.test_file_path = os.path.join(self.test_scripts_folder, \"test_performance.py\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/602", "pr_id": 1015934205 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 599e710f0..3930745dc 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -339,17 +339,6 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n- if args.num_cpu_threads_per_process is None:\n- local_size = get_int_from_env(\n- [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n- )\n- args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n- if args.num_cpu_threads_per_process == 0:\n- args.num_cpu_threads_per_process = 1\n- logger.info(\n- f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process} to improve oob performance.\"\n- )\n-\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n \n process = subprocess.Popen(cmd, env=current_env)\n@@ -447,9 +436,6 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n- if args.num_cpu_threads_per_process is None:\n- args.num_cpu_threads_per_process = 1\n- logger.info(f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process}.\")\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n@@ -803,6 +789,17 @@ def launch_command(args):\n if \"--num_processes\" in warn:\n warned[i] = warn.replace(\"`1`\", f\"`{args.num_processes}`\")\n \n+ if args.num_cpu_threads_per_process is None:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ warned.append(\n+ f\"\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance\"\n+ )\n+\n if any(warned):\n message = \"The following values were not passed to `accelerate launch` and had defaults used instead:\\n\"\n message += \"\\n\".join(warned)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/598", "pr_id": 1015387439 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 0da843ec2..84c4e03f6 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -33,6 +33,8 @@\n title: How to avoid CUDA Out-of-Memory\n - local: usage_guides/sagemaker\n title: Using Accelerate on SageMaker\n+ - local: usage_guides/mps\n+ title: How to use Apple Silicon M1 GPUs\n title: How-To Guides\n - sections:\n - local: concept_guides/gradient_synchronization\ndiff --git a/docs/source/usage_guides/mps.mdx b/docs/source/usage_guides/mps.mdx\nnew file mode 100644\nindex 000000000..43a00f104\n--- /dev/null\n+++ b/docs/source/usage_guides/mps.mdx\n@@ -0,0 +1,81 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+**Pre-requisites**: To install torch with mps support, \n+please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1).\n+\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query:\n+\n+```\n+ Which type of machine are you using?. \n+ ```\n+\n+This will generate a config file that will be used automatically to properly set \n+the default options when doing `accelerate launch`, such as the one shown below:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MPS\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```\n+\n+After this configuration has been made, here is how you run the CV example \n+(from the root of the repo) with MPS enabled:\n+\n+```bash\n+accelerate launch /examples/cv_example.py --data_dir images\n+```\n+\n+## A few caveats to be aware of\n+\n+1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \n+This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.\n+2. Distributed setups `gloo` and `nccl` are not working with `mps` device. \n+This means that currently only single GPU of `mps` device type can be used.\n+\n+Finally, please, remember that, πŸ€— `Accelerate` only integrates MPS backend, therefore if you\n+have any problems or questions with regards to MPS backend usage, please, file an issue with [PyTorch GitHub](https://github.com/pytorch/pytorch/issues).\n\\ No newline at end of file\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex bbd8db435..9cd4b7da3 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -29,7 +29,7 @@\n from .logging import get_logger\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n-from .state import AcceleratorState, GradientState\n+from .state import AcceleratorState, GradientState, parse_flag_from_env\n from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n MODEL_NAME,\n@@ -282,7 +282,7 @@ def __init__(\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n- if not torch.cuda.is_available():\n+ if not torch.cuda.is_available() and not parse_flag_from_env(\"USE_MPS_DEVICE\"):\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n if self.distributed_type == DistributedType.FSDP:\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex ba677e3eb..1ba5839e2 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -28,9 +28,9 @@\n \n def get_cluster_input():\n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): \",\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): \",\n _convert_distributed_mode,\n- error_message=\"Please enter 0, 1, 2 or 3.\",\n+ error_message=\"Please enter 0, 1, 2, 3 or 4.\",\n )\n \n machine_rank = 0\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex fe72be267..9dd1f4c99 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -37,7 +37,7 @@ def _convert_compute_environment(value):\n \n def _convert_distributed_mode(value):\n value = int(value)\n- return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\"][value])\n+ return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\", \"MPS\"][value])\n \n \n def _convert_sagemaker_distributed_mode(value):\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 599e710f0..863f903c1 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -63,6 +63,12 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Whether or not this should launch a distributed GPU training.\",\n )\n+ parser.add_argument(\n+ \"--use_mps_device\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether or not this should use MPS-enabled GPU device on MacOS machines.\",\n+ )\n parser.add_argument(\n \"--use_deepspeed\",\n default=False,\n@@ -320,6 +326,7 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\n if args.num_machines > 1:\n current_env[\"MASTER_ADDR\"] = args.main_process_ip\n current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n@@ -752,11 +759,18 @@ def launch_command(args):\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n- if not args.multi_gpu and not args.tpu and not args.use_deepspeed and not args.use_fsdp:\n+ if (\n+ not args.multi_gpu\n+ and not args.tpu\n+ and not args.use_deepspeed\n+ and not args.use_fsdp\n+ and not args.use_mps_device\n+ ):\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n+ args.use_mps_device = defaults.distributed_type == DistributedType.MPS\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex a4f6bac2a..bd339048f 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -206,7 +206,26 @@ def __init__(\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n self.process_index = self.local_process_index = 0\n- self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\n+ if parse_flag_from_env(\"USE_MPS_DEVICE\") and not cpu:\n+ if not torch.backends.mps.is_available():\n+ if not torch.backends.mps.is_built():\n+ raise AssertionError(\n+ \"MPS not available because the current PyTorch install was not \"\n+ \"built with MPS enabled. Please install torch version >=1.12.0 on \"\n+ \"your Apple silicon Mac running macOS 12.3 or later with a native \"\n+ \"version (arm64) of Python\"\n+ )\n+ else:\n+ raise AssertionError(\n+ \"MPS not available because the current MacOS version is not 12.3+ \"\n+ \"and/or you do not have an MPS-enabled device on this machine.\"\n+ )\n+ else:\n+ self.device = torch.device(\"mps\")\n+ elif cpu or not torch.cuda.is_available():\n+ self.device = torch.device(\"cpu\")\n+ else:\n+ self.device = torch.device(\"cuda\")\n self.mixed_precision = mixed_precision\n self.initialized = True\n \ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 350d95320..50c80adb1 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -123,6 +123,7 @@ class DistributedType(str, enum.Enum):\n DEEPSPEED = \"DEEPSPEED\"\n FSDP = \"FSDP\"\n TPU = \"TPU\"\n+ MPS = \"MPS\"\n \n \n class SageMakerDistributedType(str, enum.Enum):\n", "code_comments": [ { "body": "Can we do something similar to this [here](https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/testing.py#L94-L99) so that cuda tests can also be ran? As so far tests are only running on the CPU here πŸ˜„ ", "diff_hunk": "@@ -272,7 +272,7 @@ def __init__(\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n- if not torch.cuda.is_available():\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":", "from_author": false }, { "body": "There might be other truthy values we're not catching here, can se use something like `parse_flag_from_env`?", "diff_hunk": "@@ -272,7 +272,7 @@ def __init__(\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n- if not torch.cuda.is_available():\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":", "from_author": false }, { "body": "```suggestion\r\n current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device)\r\n```\r\nWe can't rely on users always setting lowercased values, so we should handle any casing.", "diff_hunk": "@@ -320,6 +326,7 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device).lower()", "from_author": false }, { "body": "Give instructions here maybe?", "diff_hunk": "@@ -208,7 +208,24 @@ def __init__(\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n self.process_index = self.local_process_index = 0\n- self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\n+ if os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"true\" and not cpu:\n+ if not torch.backends.mps.is_available():\n+ if not torch.backends.mps.is_built():\n+ raise AssertionError(\n+ \"MPS not available because the current PyTorch install was not \"\n+ \"built with MPS enabled.\"", "from_author": false }, { "body": "Hello, `use_mps_device` is a boolean argument using argparse action, so I think this should be fine.", "diff_hunk": "@@ -320,6 +326,7 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device).lower()", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -208,7 +208,24 @@ def __init__(\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n self.process_index = self.local_process_index = 0\n- self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\n+ if os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"true\" and not cpu:\n+ if not torch.backends.mps.is_available():\n+ if not torch.backends.mps.is_built():\n+ raise AssertionError(\n+ \"MPS not available because the current PyTorch install was not \"\n+ \"built with MPS enabled.\"", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -320,6 +326,7 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n+ current_env[\"USE_MPS_DEVICE\"] = str(args.use_mps_device).lower()", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -272,7 +272,7 @@ def __init__(\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n- if not torch.cuda.is_available():\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":", "from_author": true }, { "body": "Hello, I didn't understand. Let's take this offline to better understand.", "diff_hunk": "@@ -272,7 +272,7 @@ def __init__(\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n- if not torch.cuda.is_available():\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":", "from_author": true }, { "body": "Specify what specific question related to MPS needs to be answered here :) ", "diff_hunk": "@@ -0,0 +1,93 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this. \n+New device `mps` maps computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer Official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```\n+\n+## How it works out of the box\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing", "from_author": false }, { "body": "Resolving based on Sylvains note about the tests", "diff_hunk": "@@ -272,7 +272,7 @@ def __init__(\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n- if not torch.cuda.is_available():\n+ if not torch.cuda.is_available() and os.environ.get(\"USE_MPS_DEVICE\", \"false\") == \"false\":", "from_author": false }, { "body": "Add a line under each header, I believe this is why the URLs aren't showing correctly and why everything's formatting seems to be broken: https://moon-ci-docs.huggingface.co/docs/accelerate/pr_596/en/usage_guides/mps#accelerated-pytorch-training-on-mac", "diff_hunk": "@@ -0,0 +1,93 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. ", "from_author": false }, { "body": "```suggestion\r\nApple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\r\n```", "diff_hunk": "@@ -0,0 +1,93 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this. \n+New device `mps` maps computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,93 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. ", "from_author": true }, { "body": "Done\r\n", "diff_hunk": "@@ -0,0 +1,93 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this. \n+New device `mps` maps computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer Official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```\n+\n+## How it works out of the box\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing", "from_author": true }, { "body": "We can break this up a bit to make it more digestible/readable + syntax highlighting\r\n\r\n```suggestion\r\n```python\r\n# installing torch with m1 support on mac\r\n# install latest python from https://www.python.org/downloads/release/python-3106/\r\n# check the platform\r\nimport platform\r\nplatform.platform()\r\n```\r\n```python out\r\n'macOS-12.5-arm64-arm-64bit' \r\n```\r\n```python\r\n# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\r\n# install torch 1.12 via the below command\r\n# pip3 install torch torchvision torchaudio\r\n# test the `mps` device support\r\nimport torch\r\ntorch.has_mps\r\n```\r\n```python out\r\nTrue\r\n```\r\n```python\r\na = torch.Tensor([10,11])\r\na.to(\"mps\")\r\n```\r\n```python out\r\n/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\r\n nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\r\ntensor([10.0000, 11.0000], device='mps:0')\r\n```\r\n```", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```", "from_author": false }, { "body": "I can't make a suggestion for this one, but I recommend something like this:\r\n\r\nand answer the questions asked, specifically choose `MPS` for the query:\r\n```\r\nWhich type of machine are you using?\r\n```\r\nThis will generate a config file that will be used automatically to properly set the\r\ndefault options when doing", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing", "from_author": false }, { "body": "Move this section to after you've ran `accelerate config` and state that this is what the generated configuration looks like", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the CV example (from the root of the repo) with MPS enabled:\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MPS\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```", "from_author": false }, { "body": "I feel the above block is fine as it might be too distracting to have multiple blocks which can break the flow when the user is reading ", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```", "from_author": true }, { "body": "But then it would be breaking the flow of commands. I have followed generic commands `accelerate config` and `accelerate launch script.py args` with specific example so that user can quickly get the gist. ", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the CV example (from the root of the repo) with MPS enabled:\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MPS\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```", "from_author": true }, { "body": "If you're using comments inside a code-block in the documentation, you are doing something wrong; you can put the comments as sentences **then** enter a code-block. Also you should use the doctest syntax to highlight the inputs and outputs, as it's not clear otherwise.", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```", "from_author": false }, { "body": "I'd probably change it to be:\r\n\r\n```\r\nThis will generate a config file that will be used automatically to properly set the default options when doing `accelerate launch`, such as the one shown below:\r\n```\r\nAnd just don't show the full `accelerate launch` snippet, since here we want to focus on the config not the launch.\r\n\r\nThe next part would then say:\r\n```\r\nAfter this configuration has been made, here is how you run the CV example (from the root of the repo) with MPS enabled:\r\n```\r\n\r\nThis way each part is focused on what matters to that section", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query `Which type of machine are you using?` . \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the CV example (from the root of the repo) with MPS enabled:\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MPS\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```", "from_author": false }, { "body": "Oh, okay, then maybe I can just leave it at the link to the medium blogpost instead of summarizing it this way. ", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```", "from_author": true }, { "body": "Done. Removed this entire block and just left it with a link to the medium blogpost explaining this in detail. ", "diff_hunk": "@@ -0,0 +1,100 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+### Pre-requisites: Installing torch with mps support\n+\n+Following this great medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1),\n+below are the steps to make sure torch is installed with `mps` support.\n+\n+```\n+# installing torch with m1 support on mac\n+# install latest python from https://www.python.org/downloads/release/python-3106/\n+# check the platform\n+import platform\n+platform.platform()\n+'macOS-12.5-arm64-arm-64bit' \n+# This is compatible as the macOS version is above 12.3 and it is the ARM64 native version of Python\n+# install torch 1.12 via the below command\n+# pip3 install torch torchvision torchaudio\n+# test the `mps` device support\n+import torch\n+torch.has_mps\n+True\n+a = torch.Tensor([10,11])\n+a.to(\"mps\")\n+/Users/mac/ml/lib/python3.10/site-packages/torch/_tensor_str.py:103: UserWarning: The operator 'aten::bitwise_and.Tensor_out' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/mps/MPSFallback.mm:11.)\n+ nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))\n+tensor([10.0000, 11.0000], device='mps:0')\n+```", "from_author": true }, { "body": "This should be rewritten or removed now that the PyTorch issue has been solved (in nightly at least), no?", "diff_hunk": "@@ -0,0 +1,81 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+**Pre-requisites**: To install torch with mps support, \n+please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1).\n+\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query:\n+\n+```\n+ Which type of machine are you using?. \n+ ```\n+\n+This will generate a config file that will be used automatically to properly set \n+the default options when doing `accelerate launch`, such as the one shown below:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MPS\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```\n+\n+After this configuration has been made, here is how you run the CV example \n+(from the root of the repo) with MPS enabled:\n+\n+```bash\n+accelerate launch /examples/cv_example.py --data_dir images\n+```\n+\n+## A few caveats to be aware of\n+\n+1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \n+This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.", "from_author": false }, { "body": "Done. Please refer this PR #629 ", "diff_hunk": "@@ -0,0 +1,81 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerated PyTorch Training on Mac\n+\n+With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. \n+This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac.\n+Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `\"mps\"` device. \n+This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS.\n+For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/)\n+and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html).\n+\n+### Benefits of Training and Inference using Apple M1 Chips\n+\n+1. Enables users to train larger networks or batch sizes locally\n+2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. \n+Therefore, improving end-to-end performance.\n+3. Reduces costs associated with cloud-based development or the need for additional local GPUs.\n+\n+**Pre-requisites**: To install torch with mps support, \n+please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1).\n+\n+\n+## How it works out of the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked, specifically choose `MPS` for the query:\n+\n+```\n+ Which type of machine are you using?. \n+ ```\n+\n+This will generate a config file that will be used automatically to properly set \n+the default options when doing `accelerate launch`, such as the one shown below:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MPS\n+downcast_bf16: 'no'\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```\n+\n+After this configuration has been made, here is how you run the CV example \n+(from the root of the repo) with MPS enabled:\n+\n+```bash\n+accelerate launch /examples/cv_example.py --data_dir images\n+```\n+\n+## A few caveats to be aware of\n+\n+1. For `nlp_example.py` the metrics are too bad when compared to CPU-only training. \n+This means certain operations in BERT model are going wrong using mps device and this needs to be fixed by PyTorch.", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "`notebook_launcher` doesn't seem to be modified in order to utilize this change that supports MPS device.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/596", "pr_id": 1015126898 }, { "diff": "diff --git a/setup.py b/setup.py\nindex a59d1ecaf..ec015f276 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\n extras[\"docs\"] = []\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n-extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\", \"tqdm\"]\n+extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed<0.7.0\", \"tqdm\"]\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/595", "pr_id": 1015083703 }, { "diff": "diff --git a/docs/source/usage_guides/tracking.mdx b/docs/source/usage_guides/tracking.mdx\nindex 2fb68c0f2..7c61b21e9 100644\n--- a/docs/source/usage_guides/tracking.mdx\n+++ b/docs/source/usage_guides/tracking.mdx\n@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.\n # Tracking\n \n There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\n-πŸ€— Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n+πŸ€— Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`]\n \n ## Integrated Trackers\n \n@@ -33,19 +33,19 @@ accelerator = Accelerator(log_with=\"wandb\")\n accelerator = Accelerator(log_with=[\"wandb\", LoggerType.TENSORBOARD])\n ```\n \n-At the start of your experiment [`~Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\n+At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\n ```python\n hps = {\"num_iterations\": 5, \"learning_rate\": 1e-2}\n accelerator.init_trackers(\"my_project\", config=hps)\n ```\n \n-When you are ready to log any data, [`~Accelerator.log`] should be used.\n+When you are ready to log any data, [`Accelerator.log`] should be used.\n A `step` can also be passed in to correlate the data with a particular step in the training loop.\n ```python\n accelerator.log({\"train_loss\": 1.12, \"valid_loss\": 0.8}, step=1)\n ```\n \n-Once you've finished training, make sure to run [`~Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any.\n+Once you've finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any.\n ```python\n accelerator.end_training()\n ```\n@@ -85,8 +85,8 @@ accelerator.end_training()\n \n ## Implementing Custom Trackers\n \n-To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`~GeneralTracker`] class.\n-Every tracker must implement three functions:\n+To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class.\n+Every tracker must implement three functions and have three properties:\n - `__init__`: \n - Should store a `run_name` and initialize the tracker API of the integrated library. \n - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added.\n@@ -95,6 +95,15 @@ Every tracker must implement three functions:\n - `log`: \n - Should take in a `values` dictionary and a `step`, and should log them to the run\n \n+ - `name` (`str`):\n+ - A unique string name for the tracker, such as `\"wandb\"` for the wandb tracker. \n+ - This will be used for interacting with this tracker specifically\n+ - `requires_logging_directory` (`bool`):\n+ - Whether a `logging_dir` is needed for this particular tracker and if it uses one.\n+ - `tracker`: \n+ - This should be implemented as a `@property` function \n+ - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.\n+\n A brief example can be seen below with an integration with Weights and Biases, containing only the relevent information:\n ```python\n from accelerate.tracking import GeneralTracker\n@@ -109,7 +118,11 @@ class MyCustomTracker(GeneralTracker):\n \n def __init__(self, run_name: str):\n self.run_name = run_name\n- wandb.init(self.run_name)\n+ run = wandb.init(self.run_name)\n+\n+ @property\n+ def tracker(self):\n+ return self.run.run\n \n def store_init_configuration(self, values: dict):\n wandb.config(values)\n@@ -118,7 +131,7 @@ class MyCustomTracker(GeneralTracker):\n wandb.log(values, step=step)\n ```\n \n-When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`~Accelerator.log_with`] to have it automatically\n+When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically\n be used with the API:\n \n ```python\n@@ -133,6 +146,30 @@ tracker = MyCustomTracker(\"some_run_name\")\n accelerator = Accelerator(log_with=[tracker, \"all\"])\n ```\n \n+## Accessing the internal tracker \n+\n+If some custom interactions with a tracker might be wanted directly, you can quickly access one using the \n+[`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker's `.name` attribute \n+and it will return that tracker on the main process.\n+\n+This example shows doing so with wandb:\n+\n+```python\n+wandb_tracker = accelerator.get_tracker(\"wandb\")\n+```\n+\n+From there you can interact with `wandb`'s `run` object like normal:\n+\n+<Tip warning={true}>\n+ Make sure to only interact with trackers on the main process!\n+</Tip>\n+\n+\n+```python\n+if accelerator.is_main_process:\n+ wandb_run.log_artifact(some_artifact_to_log)\n+```\n+\n ## When a wrapper cannot work\n \n If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 093d4c00f..bbd8db435 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -1069,10 +1069,24 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None, init_k\n for tracker in self.trackers:\n tracker.store_init_configuration(config)\n \n+ @on_main_process\n+ def get_tracker(self, name: str):\n+ \"\"\"\n+ Returns a `tracker` from `self.trackers` based on `name` on the main process only.\n+\n+ Args:\n+ name (`str`):\n+ The name of a tracker, corresponding to the `.name` property.\n+ \"\"\"\n+ for tracker in self.trackers:\n+ if tracker.name == name:\n+ return tracker.tracker\n+ raise ValueError(f\"{name} is not an available tracker stored inside the `Accelerator`.\")\n+\n @on_main_process\n def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dict] = {}):\n \"\"\"\n- Logs `values` to all stored trackers in `self.trackers`.\n+ Logs `values` to all stored trackers in `self.trackers` on the main process only.\n \n Args:\n values (`dict`):\n@@ -1092,7 +1106,7 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\n @on_main_process\n def end_training(self):\n \"\"\"\n- Runs any special end training behaviors, such as stopping trackers\n+ Runs any special end training behaviors, such as stopping trackers on the main process only.\n \"\"\"\n for tracker in self.trackers:\n tracker.finish()\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex b89630a4e..673975fc7 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -103,6 +103,13 @@ def finish(self):\n \"\"\"\n pass\n \n+ @abstractproperty\n+ def tracker(self):\n+ \"\"\"\n+ Should return internal tracking mechanism used by a tracker class (such as the `run` for wandb)\n+ \"\"\"\n+ pass\n+\n \n class TensorBoardTracker(GeneralTracker):\n \"\"\"\n@@ -129,6 +136,10 @@ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n \n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n@@ -196,6 +207,10 @@ def __init__(self, run_name: str, **kwargs):\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n \n+ @property\n+ def tracker(self):\n+ return self.run.run\n+\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n@@ -256,6 +271,10 @@ def __init__(self, run_name: str, **kwargs):\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n \n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex c32502796..917ee329d 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -224,6 +224,10 @@ def __init__(self, dir: str):\n self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)\n self.writer.writeheader()\n \n+ @property\n+ def tracker(self):\n+ return self.writer\n+\n def store_init_configuration(self, values: dict):\n logger.info(\"Call init\")\n self.writer.writerow(values)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/594", "pr_id": 1015050329 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex c2fd566b7..093d4c00f 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -412,7 +412,7 @@ def wrapper(self, *args, **kwargs):\n \n def on_local_process(local_process_idx):\n \"\"\"\n- Run func on certain local process only\n+ A decorator that will run the decorated function on a given local process index only.\n \"\"\"\n \n def decorator(func):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/591", "pr_id": 1014321308 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 1b462a1f4..bffd843d8 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -236,23 +236,13 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex 6dd61bbf8..707c93c4c 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -203,23 +203,13 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex a279e64eb..274ccdfce 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -274,23 +274,13 @@ def collate_fn(examples):\n # context manager to track the peak memory usage during the evaluation\n with TorchTracemalloc() as tracemalloc:\n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex 69856c97b..170a885e3 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -170,23 +170,13 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 91f3e41d1..9f0d55c69 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -182,23 +182,13 @@ def inner_training_loop(batch_size):\n optimizer.zero_grad()\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex 522cc571b..cb9534c4a 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -192,6 +192,8 @@ def training_function(config, args):\n else:\n # Otherwise we add the number of samples seen\n samples_seen += references.shape[0]\n+ # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:\n+ # accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex d7248682d..76ad7a64b 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -194,23 +194,13 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex f984406bf..99d109bd1 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -232,7 +232,6 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n model.eval()\n accurate = 0\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -240,16 +239,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n accurate_preds = predictions == references\n accurate += accurate_preds.long().sum()\n \ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 000da6038..094f6cfb5 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -212,23 +212,13 @@ def collate_fn(examples):\n accelerator.save_state(output_dir)\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex 875b68398..1118a2f0e 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -166,7 +166,6 @@ def training_function(config, args):\n model.eval()\n accurate = 0\n num_elems = 0\n- samples_seen = 0\n for _, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -174,16 +173,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n accurate_preds = predictions == references\n num_elems += accurate_preds.shape[0]\n accurate += accurate_preds.long().sum()\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex b1e7cba27..a126b5dd5 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -154,23 +154,13 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- # Last batch needs to be truncated on distributed systems as it contains additional samples\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- # Otherwise we add the number of samples seen\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex bbd8db435..ef421a22b 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -956,9 +956,6 @@ def gather_for_metrics(self, tensor):\n tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n The tensors for calculating metrics across all processes.\n \"\"\"\n- raise NotImplementedError(\n- \"Currently there are a number of bugs with this method. You should use `Accelerator.gather()` and drop the samples yourself for the time being.\"\n- )\n tensor = self.gather(tensor)\n if self.use_distributed:\n if self.gradient_state.remainder == -1:\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 4cedd8f1a..b8152b302 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -470,11 +470,6 @@ def _fetch_batches(self, iterator):\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n- try:\n- length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n- self.gradient_state._set_remainder(length % self.batch_size)\n- except:\n- self.gradient_state._set_remainder(-1)\n main_iterator = None\n if self.state.process_index == 0:\n # We only iterate through the DataLoader on process 0.\n@@ -501,7 +496,16 @@ def __iter__(self):\n observed_batch_size = find_batch_size(batch)\n batch_size = observed_batch_size // self.state.num_processes\n \n- if not self._drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\n+ stop_iteration = self._stop_iteration\n+ if not stop_iteration:\n+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in\n+ # the dataloader since the number of batches is a round multiple of the number of processes.\n+ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\n+ if self._stop_iteration and next_batch_info[0] is None:\n+ stop_iteration = True\n+\n+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:\n # If the last batch is not complete, let's add the first batch to it.\n batch = concatenate([batch, first_batch], dim=0)\n # Batch size computation above is wrong, it's off by 1 so we fix it.\n@@ -510,16 +514,8 @@ def __iter__(self):\n data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\n batch = slice_tensors(batch, data_slice)\n \n- stop_iteration = self._stop_iteration\n- if not stop_iteration:\n- # We may still be at the end of the dataloader without knowing it yet: if there is nothing left\n- # because by change the dataset had a round multiple of samples.\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n- # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\n- if self._stop_iteration and next_batch_info[0] is None:\n- stop_iteration = True\n-\n if stop_iteration:\n+ self.gradient_state._set_remainder(observed_batch_size)\n self.gradient_state._set_end_of_dataloader(True)\n yield batch\n \ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex 382bf81bf..43d5ed7ef 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -7,6 +7,7 @@\n execute_subprocess_async,\n require_cpu,\n require_cuda,\n+ require_huggingface_suite,\n require_multi_gpu,\n require_single_gpu,\n require_tpu,\n@@ -16,4 +17,4 @@\n from .training import RegressionDataset, RegressionModel\n \n \n-from .scripts import test_metrics, test_script, test_sync # isort:skip\n+from .scripts import test_script, test_sync # isort:skip\ndiff --git a/src/accelerate/test_utils/scripts/external_deps/__init__.py b/src/accelerate/test_utils/scripts/external_deps/__init__.py\nnew file mode 100644\nindex 000000000..e69de29bb\ndiff --git a/src/accelerate/test_utils/scripts/external_deps/test_metrics.py b/src/accelerate/test_utils/scripts/external_deps/test_metrics.py\nnew file mode 100755\nindex 000000000..d234ee14a\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/external_deps/test_metrics.py\n@@ -0,0 +1,170 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import math\n+from copy import deepcopy\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import datasets\n+import evaluate\n+import transformers\n+from accelerate import Accelerator\n+from accelerate.test_utils import RegressionDataset, RegressionModel\n+from accelerate.utils import is_tpu_available, set_seed\n+from datasets import load_dataset\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer\n+\n+\n+def get_basic_setup(accelerator, num_samples=82):\n+ \"Returns everything needed to perform basic training\"\n+ set_seed(42)\n+ model = RegressionModel()\n+ ddp_model = deepcopy(model)\n+ dset = RegressionDataset(length=num_samples)\n+ dataloader = DataLoader(dset, batch_size=16)\n+ model.to(accelerator.device)\n+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\n+ return model, ddp_model, dataloader\n+\n+\n+def get_dataloader(accelerator: Accelerator, use_longest=False):\n+ tokenizer = AutoTokenizer.from_pretrained(\"hf-internal-testing/mrpc-bert-base-cased\")\n+ dataset = load_dataset(\"glue\", \"mrpc\", split=\"validation\")\n+\n+ def tokenize_function(examples):\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ with accelerator.main_process_first():\n+ tokenized_datasets = dataset.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ if use_longest:\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+\n+ return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)\n+\n+\n+def get_mrpc_setup(dispatch_batches, split_batches):\n+ accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches)\n+ dataloader = get_dataloader(accelerator, not dispatch_batches)\n+ model = AutoModelForSequenceClassification.from_pretrained(\n+ \"hf-internal-testing/mrpc-bert-base-cased\", return_dict=True\n+ )\n+ ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)\n+ return {\"ddp\": [ddp_model, ddp_dataloader, \"cuda:0\"], \"no\": [model, dataloader, accelerator.device]}, accelerator\n+\n+\n+def generate_predictions(model, dataloader, accelerator):\n+ logits_and_targets = []\n+ for batch in dataloader:\n+ input, target = batch.values()\n+ with torch.no_grad():\n+ logit = model(input)\n+ logit, target = accelerator.gather_for_metrics((logit, target))\n+ logits_and_targets.append((logit, target))\n+ logits, targs = [], []\n+ for (logit, targ) in logits_and_targets:\n+ logits.append(logit)\n+ targs.append(targ)\n+ logits, targs = torch.cat(logits), torch.cat(targs)\n+ return logits, targs\n+\n+\n+def test_torch_metrics(accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False):\n+ model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples)\n+ logits, targs = generate_predictions(ddp_model, dataloader, accelerator)\n+ assert (\n+ len(logits) == num_samples\n+ ), f\"Unexpected number of inputs:\\n Expected: {num_samples}\\n Actual: {len(logits)}\"\n+\n+\n+def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n+ setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)\n+ # First do baseline\n+ model, dataloader, device = setup[\"no\"]\n+ model.to(device)\n+ model.eval()\n+ for batch in dataloader:\n+ batch.to(device)\n+ with torch.inference_mode():\n+ outputs = model(**batch)\n+ preds = outputs.logits.argmax(dim=-1)\n+ metric.add_batch(predictions=preds, references=batch[\"labels\"])\n+ baseline = metric.compute()\n+\n+ # Then do distributed\n+ model, dataloader, device = setup[\"ddp\"]\n+ model.eval()\n+ for batch in dataloader:\n+ with torch.inference_mode():\n+ outputs = model(**batch)\n+ preds = outputs.logits.argmax(dim=-1)\n+ references = batch[\"labels\"]\n+ preds, references = accelerator.gather_for_metrics((preds, references))\n+ metric.add_batch(predictions=preds, references=references)\n+ distributed = metric.compute()\n+\n+ for key in \"accuracy f1\".split():\n+ assert math.isclose(\n+ baseline[key], distributed[key]\n+ ), f\"Baseline and Distributed are not the same for key {key}:\\n\\tBaseline: {baseline[key]}\\n\\tDistributed: {distributed[key]}\\n\"\n+\n+\n+def main():\n+ accelerator = Accelerator(split_batches=False, dispatch_batches=False)\n+ if accelerator.is_local_main_process:\n+ datasets.utils.logging.set_verbosity_warning()\n+ transformers.utils.logging.set_verbosity_warning()\n+ else:\n+ datasets.utils.logging.set_verbosity_error()\n+ transformers.utils.logging.set_verbosity_error()\n+ # These are a bit slower so they should only be ran on the GPU or TPU\n+ if torch.cuda.is_available() or is_tpu_available():\n+ if accelerator.is_local_main_process:\n+ print(\"**Testing gather_for_metrics**\")\n+ for split_batches in [True, False]:\n+ for dispatch_batches in [True, False]:\n+ if accelerator.is_local_main_process:\n+ print(f\"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`\")\n+ test_mrpc(dispatch_batches, split_batches)\n+ accelerator.state._reset_state()\n+ if accelerator.is_local_main_process:\n+ print(\"**Test torch metrics**\")\n+ for split_batches in [True, False]:\n+ for dispatch_batches in [True, False]:\n+ accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches)\n+ if accelerator.is_local_main_process:\n+ print(f\"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99\")\n+ test_torch_metrics(accelerator, 99)\n+ accelerator.state._reset_state()\n+\n+\n+def _mp_fn(index):\n+ # For xla_spawn (TPUs)\n+ main()\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/scripts/test_metrics.py b/src/accelerate/test_utils/scripts/test_metrics.py\ndeleted file mode 100644\nindex 8f057ccc5..000000000\n--- a/src/accelerate/test_utils/scripts/test_metrics.py\n+++ /dev/null\n@@ -1,90 +0,0 @@\n-# Copyright 2022 The HuggingFace Team. All rights reserved.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-from copy import deepcopy\n-\n-import torch\n-from torch.utils.data import DataLoader\n-\n-from accelerate import Accelerator\n-from accelerate.test_utils import RegressionDataset, RegressionModel\n-from accelerate.utils import set_seed\n-\n-\n-def get_setup(accelerator, num_samples=82):\n- \"Returns everything needed to perform basic training\"\n- set_seed(42)\n- model = RegressionModel()\n- ddp_model = deepcopy(model)\n- dset = RegressionDataset(length=num_samples)\n- dataloader = DataLoader(dset, batch_size=16)\n- model.to(accelerator.device)\n- ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\n- return model, ddp_model, dataloader\n-\n-\n-def generate_predictions(model, dataloader, accelerator):\n- logits_and_targets = []\n- for batch in dataloader:\n- input, target = batch.values()\n- with torch.no_grad():\n- logits = model(input)\n- logits, target = accelerator.gather_for_metrics((logits, target))\n- logits_and_targets.append((logits, target))\n- inps, targs = [], []\n- for (inp, targ) in logits_and_targets:\n- inps.append(inp)\n- targs.append(targ)\n- inps, targs = torch.cat(inps), torch.cat(targs)\n- return inps, targs\n-\n-\n-def test_torch_metrics(accelerator: Accelerator, num_samples=82):\n- model, ddp_model, dataloader = get_setup(accelerator, num_samples)\n- inps, targs = generate_predictions(ddp_model, dataloader, accelerator)\n- assert (\n- len(inps) == num_samples\n- ), f\"Unexpected number of inputs:\\n Expected: {num_samples}\\n Actual: {len(inps)}\"\n-\n-\n-def main():\n- accelerator = Accelerator(split_batches=False, dispatch_batches=False)\n- if accelerator.is_local_main_process:\n- print(\"**Test torch metrics**\")\n- print(\"With: `split_batches=False`, `dispatch_batches=False`\")\n- test_torch_metrics(accelerator)\n- accelerator.state._reset_state()\n- accelerator = Accelerator(split_batches=True, dispatch_batches=False)\n- if accelerator.is_local_main_process:\n- print(\"With: `split_batches=True`, `dispatch_batches=False`\")\n- test_torch_metrics(accelerator)\n- accelerator.state._reset_state()\n- accelerator = Accelerator(split_batches=False, dispatch_batches=True)\n- if accelerator.is_local_main_process:\n- print(\"With: `split_batches=False`, `dispatch_batches=True`\")\n- test_torch_metrics(accelerator)\n- accelerator.state._reset_state()\n- accelerator = Accelerator(split_batches=True, dispatch_batches=True)\n- if accelerator.is_local_main_process:\n- print(\"With: `split_batches=True`, `dispatch_batches=True`\")\n- test_torch_metrics(accelerator)\n-\n-\n-def _mp_fn(index):\n- # For xla_spawn (TPUs)\n- main()\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 145cc80b2..381f92c58 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -30,10 +30,12 @@\n from ..utils import (\n gather,\n is_comet_ml_available,\n+ is_datasets_available,\n is_deepspeed_available,\n is_tensorboard_available,\n is_torch_version,\n is_tpu_available,\n+ is_transformers_available,\n is_wandb_available,\n )\n \n@@ -84,6 +86,15 @@ def require_cuda(test_case):\n return unittest.skipUnless(torch.cuda.is_available(), \"test requires a GPU\")(test_case)\n \n \n+def require_huggingface_suite(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.\n+ \"\"\"\n+ return unittest.skipUnless(\n+ is_transformers_available() and is_datasets_available(), \"test requires the Hugging Face suite\"\n+ )(test_case)\n+\n+\n def require_tpu(test_case):\n \"\"\"\n Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex d1a015b93..db21afc3a 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -25,6 +25,7 @@\n is_boto3_available,\n is_ccl_available,\n is_comet_ml_available,\n+ is_datasets_available,\n is_deepspeed_available,\n is_sagemaker_available,\n is_tensorboard_available,\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex bc4e3b4cd..074d02e4a 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -89,6 +89,10 @@ def is_transformers_available():\n return importlib.util.find_spec(\"transformers\") is not None\n \n \n+def is_datasets_available():\n+ return importlib.util.find_spec(\"datasets\") is not None\n+\n+\n def is_tensorboard_available():\n return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n \ndiff --git a/tests/test_metrics.py b/tests/test_metrics.py\nindex 7e42c793e..b26ef00b2 100644\n--- a/tests/test_metrics.py\n+++ b/tests/test_metrics.py\n@@ -23,31 +23,36 @@\n from accelerate.test_utils import (\n execute_subprocess_async,\n require_cpu,\n+ require_huggingface_suite,\n require_multi_gpu,\n require_single_gpu,\n- skip,\n- test_metrics,\n )\n from accelerate.utils import get_launch_prefix, patch_environment\n \n \n-@skip\n+@require_huggingface_suite\n class MetricTester(unittest.TestCase):\n def setUp(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n- self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_metrics.py\"])\n+ self.test_file_path = os.path.sep.join(\n+ mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"external_deps\", \"test_metrics.py\"]\n+ )\n+\n+ from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401\n+\n+ self.test_metrics = test_metrics\n \n @require_cpu\n def test_metric_cpu_noop(self):\n- debug_launcher(test_metrics.main, num_processes=1)\n+ debug_launcher(self.test_metrics.main, num_processes=1)\n \n @require_cpu\n def test_metric_cpu_multi(self):\n- debug_launcher(test_metrics.main)\n+ debug_launcher(self.test_metrics.main)\n \n @require_single_gpu\n def test_metric_gpu(self):\n- test_metrics.main()\n+ self.test_metrics.main()\n \n @require_multi_gpu\n def test_metric_gpu_multi(self):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/590", "pr_id": 1014316178 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex a0278218b..806f0094d 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -1,6 +1,6 @@\n name: Run Tests\n \n-# on: [pull_request]\n+on: [pull_request]\n \n env:\n HF_HOME: ~/hf_cache\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/589", "pr_id": 1014275205 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 531f8eff9..4cedd8f1a 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -400,7 +400,7 @@ class DataLoaderDispatcher(DataLoader):\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n \n- def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = False, **kwargs):\n shuffle = False\n if is_torch_version(\">=\", \"1.11.0\"):\n from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe\n@@ -419,6 +419,13 @@ def __init__(self, dataset, split_batches: bool = False, **kwargs):\n \n self.gradient_state = GradientState()\n self.state = AcceleratorState()\n+ self._drop_last = _drop_last\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.total_batch_size)\n+ except Exception:\n+ # We can safely pass because the default is -1\n+ pass\n \n def _fetch_batches(self, iterator):\n batches, batch = None, None\n@@ -448,7 +455,7 @@ def _fetch_batches(self, iterator):\n self._stop_iteration = batch_info[1]\n if self._stop_iteration:\n # If drop_last is False and split_batches is False, we may have a remainder to take care of.\n- if not self.split_batches and not self.drop_last:\n+ if not self.split_batches and not self._drop_last:\n if self.state.process_index == 0 and len(batches) > 0:\n batch = concatenate(batches, dim=0)\n batch_info = [get_data_structure(batch), False]\n@@ -472,10 +479,12 @@ def __iter__(self):\n if self.state.process_index == 0:\n # We only iterate through the DataLoader on process 0.\n main_iterator = super().__iter__()\n+ stop_iteration = False\n self._stop_iteration = False\n first_batch = None\n- batch, batch_info, skip = self._fetch_batches(main_iterator)\n- while True:\n+ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ while not stop_iteration:\n+ batch, batch_info, skip = next_batch, next_batch_info, next_skip\n if skip:\n continue\n if self.state.process_index != 0:\n@@ -485,34 +494,40 @@ def __iter__(self):\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n \n- if not self.drop_last and first_batch is None:\n+ if not self._drop_last and first_batch is None:\n # We keep at least num processes elements of the first batch to be able to complete the last batch\n first_batch = slice_tensors(batch, slice(0, self.state.num_processes))\n \n observed_batch_size = find_batch_size(batch)\n batch_size = observed_batch_size // self.state.num_processes\n \n- if not self.drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\n+ if not self._drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\n # If the last batch is not complete, let's add the first batch to it.\n batch = concatenate([batch, first_batch], dim=0)\n+ # Batch size computation above is wrong, it's off by 1 so we fix it.\n batch_size += 1\n \n data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n batch = slice_tensors(batch, data_slice)\n- if not self._stop_iteration:\n- yield batch\n- batch, batch_info, skip = next_batch, next_batch_info, next_skip\n- else:\n+\n+ stop_iteration = self._stop_iteration\n+ if not stop_iteration:\n+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left\n+ # because by change the dataset had a round multiple of samples.\n+ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\n+ if self._stop_iteration and next_batch_info[0] is None:\n+ stop_iteration = True\n+\n+ if stop_iteration:\n self.gradient_state._set_end_of_dataloader(True)\n- yield batch\n- break\n+ yield batch\n \n def __len__(self):\n whole_length = super().__len__()\n if self.split_batches:\n return whole_length\n- elif self.drop_last:\n+ elif self._drop_last:\n return whole_length // self.state.num_processes\n else:\n return math.ceil(whole_length / self.state.num_processes)\n@@ -675,6 +690,7 @@ def prepare_data_loader(\n new_dataset,\n split_batches=split_batches,\n batch_sampler=new_batch_sampler,\n+ _drop_last=dataloader.drop_last,\n **kwargs,\n )\n else:\ndiff --git a/src/accelerate/test_utils/scripts/test_sync.py b/src/accelerate/test_utils/scripts/test_sync.py\nindex ae5a2c65b..8cee87d3d 100644\n--- a/src/accelerate/test_utils/scripts/test_sync.py\n+++ b/src/accelerate/test_utils/scripts/test_sync.py\n@@ -245,7 +245,7 @@ def main():\n \"**Test `accumulate` gradient accumulation, \",\n f\"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**\",\n )\n- test_gradient_accumulation(split_batch)\n+ test_gradient_accumulation(split_batch, dispatch_batches)\n if state.local_process_index == 0:\n print(\n \"**Test `accumulate` gradient accumulation with optimizer and scheduler, \",\n", "code_comments": [ { "body": "PyTorch does not let us pass `drop_last`.", "diff_hunk": "@@ -400,7 +400,7 @@ class DataLoaderDispatcher(DataLoader):\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n \n- def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = False, **kwargs):", "from_author": true }, { "body": "Removed this logic as we don't need to take the next batch to have an informed decision on whether we are the last batch or not. This was actually causing the dataloader to stop one batch too early.", "diff_hunk": "@@ -485,34 +492,31 @@ def __iter__(self):\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n \n- if not self.drop_last and first_batch is None:\n+ if not self._drop_last and first_batch is None:\n # We keep at least num processes elements of the first batch to be able to complete the last batch\n first_batch = slice_tensors(batch, slice(0, self.state.num_processes))\n \n observed_batch_size = find_batch_size(batch)\n batch_size = observed_batch_size // self.state.num_processes\n \n- if not self.drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\n+ if not self._drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\n # If the last batch is not complete, let's add the first batch to it.\n batch = concatenate([batch, first_batch], dim=0)\n+ # Batch size computation above is wrong, it's off by 1 so we fix it.\n batch_size += 1\n \n data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\n- next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n batch = slice_tensors(batch, data_slice)\n- if not self._stop_iteration:\n- yield batch\n- batch, batch_info, skip = next_batch, next_batch_info, next_skip\n- else:", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/588", "pr_id": 1014194919 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 7100d2c88..188800527 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -405,17 +405,12 @@ def get_balanced_memory(\n leaves = [n for n in module_sizes if len([p for p in module_sizes if p.startswith(n) and len(p) > len(n)]) == 0]\n mean_leaves = int(sum([module_sizes[n] for n in leaves]) / len(leaves))\n buffer = int(1.25 * max(buffer, mean_leaves))\n- if low_zero:\n- per_gpu += buffer\n- gpu_zero = 0\n- else:\n- gpu_zero = per_gpu\n- per_gpu += buffer\n+ per_gpu += buffer\n \n max_memory = get_max_memory(max_memory)\n- for i in range(num_devices):\n- # We still leave slightly more space on GPU 0 and only apply the buffer on the other devices.\n- max_memory[i] = min(gpu_zero if i == 0 else per_gpu, max_memory[i])\n+ # The last device is left with max_memory just in case the buffer is not enough.\n+ for i in range(num_devices - 1):\n+ max_memory[i] = min(0 if low_zero and i == 0 else per_gpu, max_memory[i])\n \n if low_zero:\n min_zero = max(0, module_sizes[\"\"] - sum([max_memory[i] for i in range(1, num_devices)]))\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex 799170257..f5c36ee3d 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -365,7 +365,11 @@ def test_get_balanced_memory(self):\n model = ModelForTest()\n # model has size 236: linear1 64, batchnorm 72, linear2 100\n max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200})\n- self.assertDictEqual({0: 118, 1: 200}, max_memory)\n+ self.assertDictEqual({0: 200, 1: 200}, max_memory)\n \n max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300})\n- self.assertDictEqual({0: 118, 1: 215}, max_memory)\n+ self.assertDictEqual({0: 215, 1: 300}, max_memory)\n+\n+ # Last device always get max memory to give more buffer and avoid accidental CPU offload\n+ max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500})\n+ self.assertDictEqual({0: 215, 1: 500}, max_memory)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/583", "pr_id": 1011203365 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 5d31dd8f4..1b462a1f4 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -72,11 +72,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex dc41b91e5..6dd61bbf8 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -92,11 +92,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex b95fb9908..a279e64eb 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -127,11 +127,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex 580d07a57..69856c97b 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -67,11 +67,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex d85319552..91f3e41d1 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -72,11 +72,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex cf581c73d..522cc571b 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -74,11 +74,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 0da8c437a..d7248682d 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -72,11 +72,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex fc0fae90b..000da6038 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -91,11 +91,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 33c0ed7aa..b1e7cba27 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -65,11 +65,13 @@ def tokenize_function(examples):\n return outputs\n \n # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n- )\n+ # starting with the main process first:\n+ with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/581", "pr_id": 1011025855 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 3860875e9..5d31dd8f4 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -234,6 +234,7 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\n batch.to(accelerator.device)\n@@ -241,7 +242,15 @@ def training_function(config, args):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex cd5b2fd02..dc41b91e5 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -201,13 +201,23 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 1426dd0cd..b95fb9908 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -272,6 +272,7 @@ def collate_fn(examples):\n # context manager to track the peak memory usage during the evaluation\n with TorchTracemalloc() as tracemalloc:\n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n@@ -279,7 +280,15 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex 2e6de6a32..580d07a57 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -168,13 +168,23 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 6f7cf00fd..d85319552 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -180,13 +180,23 @@ def inner_training_loop(batch_size):\n optimizer.zero_grad()\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex f39b064d6..cf581c73d 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -190,8 +190,6 @@ def training_function(config, args):\n else:\n # Otherwise we add the number of samples seen\n samples_seen += references.shape[0]\n- # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:\n- # accelerator.gather_for_metrics((predictions, references))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex d8c985e88..0da8c437a 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -192,6 +192,7 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\n batch.to(accelerator.device)\n@@ -199,7 +200,15 @@ def training_function(config, args):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex ee051a0c5..f984406bf 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -232,6 +232,7 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n model.eval()\n accurate = 0\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -239,8 +240,17 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n- accurate_preds = predictions == labels\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n+ accurate_preds = predictions == references\n accurate += accurate_preds.long().sum()\n \n eval_metric = accurate.item() / accelerator.gradient_state.samples_seen\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 88d379c8e..fc0fae90b 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -210,6 +210,7 @@ def collate_fn(examples):\n accelerator.save_state(output_dir)\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n@@ -217,7 +218,15 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex 6663b2fe9..875b68398 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -166,6 +166,7 @@ def training_function(config, args):\n model.eval()\n accurate = 0\n num_elems = 0\n+ samples_seen = 0\n for _, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -173,8 +174,17 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n- accurate_preds = predictions == labels\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n+ accurate_preds = predictions == references\n num_elems += accurate_preds.shape[0]\n accurate += accurate_preds.long().sum()\n \ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 2ef18598b..33c0ed7aa 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -152,13 +152,23 @@ def training_function(config, args):\n optimizer.zero_grad()\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 977cd5d24..5a9a55c68 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -943,6 +943,9 @@ def gather_for_metrics(self, tensor):\n tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n The tensors for calculating metrics across all processes.\n \"\"\"\n+ raise NotImplementedError(\n+ \"Currently there are a number of bugs with this method. You should use `Accelerator.gather()` and drop the samples yourself for the time being.\"\n+ )\n tensor = self.gather(tensor)\n if self.use_distributed:\n if self.gradient_state.remainder == -1:\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex d65099602..382bf81bf 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -10,6 +10,7 @@\n require_multi_gpu,\n require_single_gpu,\n require_tpu,\n+ skip,\n slow,\n )\n from .training import RegressionDataset, RegressionModel\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex b5cccc1ff..145cc80b2 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -57,6 +57,11 @@ def parse_flag_from_env(key, default=False):\n _run_slow_tests = parse_flag_from_env(\"RUN_SLOW\", default=False)\n \n \n+def skip(test_case):\n+ \"Decorator that skips a test unconditionally\"\n+ return unittest.skip(\"Test was skipped\")(test_case)\n+\n+\n def slow(test_case):\n \"\"\"\n Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a\ndiff --git a/tests/test_metrics.py b/tests/test_metrics.py\nindex c744a2fdb..7e42c793e 100644\n--- a/tests/test_metrics.py\n+++ b/tests/test_metrics.py\n@@ -25,11 +25,13 @@\n require_cpu,\n require_multi_gpu,\n require_single_gpu,\n+ skip,\n test_metrics,\n )\n from accelerate.utils import get_launch_prefix, patch_environment\n \n \n+@skip\n class MetricTester(unittest.TestCase):\n def setUp(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/580", "pr_id": 1010973479 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 9ee840440..599e710f0 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -752,7 +752,7 @@ def launch_command(args):\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n- if not args.multi_gpu and not args.tpu and not args.use_deepspeed:\n+ if not args.multi_gpu and not args.tpu and not args.use_deepspeed and not args.use_fsdp:\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/579", "pr_id": 1010899612 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 5e364491e..3860875e9 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -241,7 +241,7 @@ def training_function(config, args):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex 79a207fa8..cd5b2fd02 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -207,9 +207,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics(\n- (predictions, batch[\"labels\"]), eval_dataloader\n- )\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\n@@ -228,7 +226,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), test_dataloader)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n fold_predictions.append(predictions.cpu())\n if i == 0:\n # We need all of the test predictions\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 8046b4557..1426dd0cd 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -279,9 +279,7 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics(\n- (predictions, batch[\"labels\"]), eval_dataloader\n- )\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex fe3d948f5..2e6de6a32 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -174,7 +174,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex f71283561..6f7cf00fd 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -186,9 +186,7 @@ def inner_training_loop(batch_size):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics(\n- (predictions, batch[\"labels\"]), eval_dataloader\n- )\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex d69c0bc90..f39b064d6 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -191,7 +191,7 @@ def training_function(config, args):\n # Otherwise we add the number of samples seen\n samples_seen += references.shape[0]\n # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:\n- # accelerator.gather_for_metrics((predictions, references), eval_dataloader)\n+ # accelerator.gather_for_metrics((predictions, references))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex d453b770c..d8c985e88 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -199,7 +199,7 @@ def training_function(config, args):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 17cc83170..ee051a0c5 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -239,7 +239,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]), eval_dataloader)\n+ predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n accurate_preds = predictions == labels\n accurate += accurate_preds.long().sum()\n \ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex dc0cf43ed..88d379c8e 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -217,7 +217,7 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex aa97ce2d7..6663b2fe9 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -173,7 +173,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]), eval_dataloader)\n+ predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]))\n accurate_preds = predictions == labels\n num_elems += accurate_preds.shape[0]\n accurate += accurate_preds.long().sum()\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex d64ab3bb7..2ef18598b 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -158,7 +158,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]))\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex acf960026..977cd5d24 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -934,7 +934,7 @@ def gather(self, tensor):\n \"\"\"\n return gather(tensor)\n \n- def gather_for_metrics(self, tensor, dataloader):\n+ def gather_for_metrics(self, tensor):\n \"\"\"\n Gathers `tensor` and potentially drops duplicates in the last batch if on a distributed system. Should be used\n for gathering the inputs and targets for metric calculation.\n@@ -942,17 +942,20 @@ def gather_for_metrics(self, tensor, dataloader):\n Args:\n tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n The tensors for calculating metrics across all processes.\n- dataloader (`torch.utils.data.DataLoader`):\n- A dataloader prepared with `Accelerator.prepare`\n \"\"\"\n tensor = self.gather(tensor)\n if self.use_distributed:\n+ if self.gradient_state.remainder == -1:\n+ logger.info(\n+ \"The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\"\n+ )\n+ return tensor\n try:\n # Then see if we're on the last batch of our eval dataloader\n if self.gradient_state.end_of_dataloader:\n # Last batch needs to be truncated on distributed systems as it contains additional samples\n def _adjust_samples(tensor):\n- return tensor[: dataloader.total_dataset_length - self.gradient_state.samples_seen]\n+ return tensor[: self.gradient_state.remainder]\n \n return recursively_apply(_adjust_samples, tensor)\n else:\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex ba3338546..531f8eff9 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -138,6 +138,10 @@ def __init__(\n self.batch_size = batch_sampler.batch_size\n self.drop_last = batch_sampler.drop_last\n \n+ @property\n+ def total_length(self):\n+ return len(self.batch_sampler)\n+\n def __len__(self):\n if self.split_batches:\n return len(self.batch_sampler)\n@@ -332,7 +336,12 @@ def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n self.gradient_state._set_end_of_dataloader(False)\n- self.gradient_state._set_samples_seen(0)\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.total_batch_size)\n+ except Exception:\n+ # We can safely pass because the default is -1\n+ pass\n dataloader_iter = super().__iter__()\n # We iterate one batch ahead to check when we are at the end\n try:\n@@ -346,12 +355,10 @@ def __iter__(self):\n if self.device is not None:\n current_batch = send_to_device(current_batch, self.device)\n next_batch = next(dataloader_iter)\n- self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\n yield current_batch\n current_batch = next_batch\n except StopIteration:\n self.gradient_state._set_end_of_dataloader(True)\n- self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\n yield current_batch\n break\n \n@@ -365,7 +372,10 @@ def total_batch_size(self):\n \n @property\n def total_dataset_length(self):\n- return len(self.dataset)\n+ if hasattr(\"total_length\", self.dataset):\n+ return self.dataset.total_length\n+ else:\n+ return len(self.dataset)\n \n \n class DataLoaderDispatcher(DataLoader):\n@@ -453,7 +463,11 @@ def _fetch_batches(self, iterator):\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n- self.gradient_state._set_samples_seen(0)\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.batch_size)\n+ except:\n+ self.gradient_state._set_remainder(-1)\n main_iterator = None\n if self.state.process_index == 0:\n # We only iterate through the DataLoader on process 0.\n@@ -487,11 +501,9 @@ def __iter__(self):\n next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n batch = slice_tensors(batch, data_slice)\n if not self._stop_iteration:\n- self.gradient_state._iterate_samples_seen(batch_size)\n yield batch\n batch, batch_info, skip = next_batch, next_batch_info, next_skip\n else:\n- self.gradient_state._iterate_samples_seen(batch_size)\n self.gradient_state._set_end_of_dataloader(True)\n yield batch\n break\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 06a25bc4e..27a627ae9 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -258,6 +258,7 @@ class GradientState:\n \n - **sync_gradients** (`bool`) -- Whether the gradients should be synced\n - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\n+ - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader\n \"\"\"\n \n _shared_state = {}\n@@ -267,14 +268,14 @@ def __init__(self):\n if not getattr(self, \"initialized\", False):\n self.sync_gradients = True\n self.end_of_dataloader = False\n- self.samples_seen = 0\n+ self.remainder = -1\n self.initialized = True\n \n def __repr__(self):\n return (\n f\"Sync Gradients: {self.sync_gradients}\\n\"\n f\"At end of current dataloader: {self.end_of_dataloader}\\n\"\n- f\"Samples seen: {self.samples_seen}\"\n+ f\"Extra samples added: {self.remainder}\"\n )\n \n def _set_sync_gradients(self, sync_gradients):\n@@ -285,10 +286,6 @@ def _set_end_of_dataloader(self, end_of_dataloader):\n \"Private function that sets whether the end of the current dataloader has been reached. Users should not have to call this.\"\n self.end_of_dataloader = end_of_dataloader\n \n- def _set_samples_seen(self, samples_seen):\n- \"Private function that sets the number of samples iterated over. Users should not have to call this.\"\n- self.samples_seen = samples_seen\n-\n- def _iterate_samples_seen(self, iteration: int = 1):\n- \"Private function that iterates the number of samples seen by an iteration. Users should not have to call this.\"\n- self._set_samples_seen(self.samples_seen + iteration)\n+ def _set_remainder(self, remainder):\n+ \"Private function that sets the number of remaining samples at the end of the dataloader\"\n+ self.remainder = remainder\ndiff --git a/src/accelerate/test_utils/scripts/test_metrics.py b/src/accelerate/test_utils/scripts/test_metrics.py\nindex c426d1fc5..8f057ccc5 100644\n--- a/src/accelerate/test_utils/scripts/test_metrics.py\n+++ b/src/accelerate/test_utils/scripts/test_metrics.py\n@@ -22,50 +22,63 @@\n from accelerate.utils import set_seed\n \n \n-def get_setup(accelerator):\n+def get_setup(accelerator, num_samples=82):\n \"Returns everything needed to perform basic training\"\n set_seed(42)\n model = RegressionModel()\n ddp_model = deepcopy(model)\n- dset = RegressionDataset(length=80)\n+ dset = RegressionDataset(length=num_samples)\n dataloader = DataLoader(dset, batch_size=16)\n model.to(accelerator.device)\n ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\n return model, ddp_model, dataloader\n \n \n-def accuracy(predictions, labels) -> float:\n- \"\"\"\n- Get the accuracy with respect to the most likely label\n- \"\"\"\n- return (predictions == labels).float().mean()\n-\n-\n-def test_torch_metrics():\n- accelerator = Accelerator()\n- model, ddp_model, dataloader = get_setup(accelerator)\n+def generate_predictions(model, dataloader, accelerator):\n+ logits_and_targets = []\n for batch in dataloader:\n- ddp_input, ddp_target = batch.values()\n- # First do single process\n- input, target = accelerator.gather((ddp_input, ddp_target))\n- input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ input, target = batch.values()\n with torch.no_grad():\n logits = model(input)\n- accuracy_single = accuracy(logits.argmax(dim=-1), target)\n- # Then do multiprocess\n- with torch.no_grad():\n- logits = ddp_model(ddp_input)\n- logits, target = accelerator.gather_for_metrics((logits, ddp_target), dataloader)\n- accuracy_multi = accuracy(logits.argmax(dim=-1), target)\n- assert torch.allclose(accuracy_single, accuracy_multi), \"The two accuracies were not the same!\"\n+ logits, target = accelerator.gather_for_metrics((logits, target))\n+ logits_and_targets.append((logits, target))\n+ inps, targs = [], []\n+ for (inp, targ) in logits_and_targets:\n+ inps.append(inp)\n+ targs.append(targ)\n+ inps, targs = torch.cat(inps), torch.cat(targs)\n+ return inps, targs\n+\n+\n+def test_torch_metrics(accelerator: Accelerator, num_samples=82):\n+ model, ddp_model, dataloader = get_setup(accelerator, num_samples)\n+ inps, targs = generate_predictions(ddp_model, dataloader, accelerator)\n+ assert (\n+ len(inps) == num_samples\n+ ), f\"Unexpected number of inputs:\\n Expected: {num_samples}\\n Actual: {len(inps)}\"\n \n \n def main():\n- accelerator = Accelerator()\n- state = accelerator.state\n- if state.local_process_index == 0:\n+ accelerator = Accelerator(split_batches=False, dispatch_batches=False)\n+ if accelerator.is_local_main_process:\n print(\"**Test torch metrics**\")\n- test_torch_metrics()\n+ print(\"With: `split_batches=False`, `dispatch_batches=False`\")\n+ test_torch_metrics(accelerator)\n+ accelerator.state._reset_state()\n+ accelerator = Accelerator(split_batches=True, dispatch_batches=False)\n+ if accelerator.is_local_main_process:\n+ print(\"With: `split_batches=True`, `dispatch_batches=False`\")\n+ test_torch_metrics(accelerator)\n+ accelerator.state._reset_state()\n+ accelerator = Accelerator(split_batches=False, dispatch_batches=True)\n+ if accelerator.is_local_main_process:\n+ print(\"With: `split_batches=False`, `dispatch_batches=True`\")\n+ test_torch_metrics(accelerator)\n+ accelerator.state._reset_state()\n+ accelerator = Accelerator(split_batches=True, dispatch_batches=True)\n+ if accelerator.is_local_main_process:\n+ print(\"With: `split_batches=True`, `dispatch_batches=True`\")\n+ test_torch_metrics(accelerator)\n \n \n def _mp_fn(index):\n", "code_comments": [ { "body": "I think this can be an info, no need for a warning here.", "diff_hunk": "@@ -934,25 +934,28 @@ def gather(self, tensor):\n \"\"\"\n return gather(tensor)\n \n- def gather_for_metrics(self, tensor, dataloader):\n+ def gather_for_metrics(self, tensor):\n \"\"\"\n Gathers `tensor` and potentially drops duplicates in the last batch if on a distributed system. Should be used\n for gathering the inputs and targets for metric calculation.\n \n Args:\n tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n The tensors for calculating metrics across all processes.\n- dataloader (`torch.utils.data.DataLoader`):\n- A dataloader prepared with `Accelerator.prepare`\n \"\"\"\n tensor = self.gather(tensor)\n if self.use_distributed:\n+ if self.gradient_state.remainder == -1:\n+ logger.warn(\n+ \"The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.\"\n+ )", "from_author": false }, { "body": "There should be an exception ehre :-)", "diff_hunk": "@@ -332,7 +336,11 @@ def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n self.gradient_state._set_end_of_dataloader(False)\n- self.gradient_state._set_samples_seen(0)\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.batch_size)\n+ except:", "from_author": false }, { "body": "Nice new test!", "diff_hunk": "@@ -22,50 +22,63 @@\n from accelerate.utils import set_seed\n \n \n-def get_setup(accelerator):\n+def get_setup(accelerator, num_samples=82):\n \"Returns everything needed to perform basic training\"\n set_seed(42)\n model = RegressionModel()\n ddp_model = deepcopy(model)\n- dset = RegressionDataset(length=80)\n+ dset = RegressionDataset(length=num_samples)\n dataloader = DataLoader(dset, batch_size=16)\n model.to(accelerator.device)\n ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\n return model, ddp_model, dataloader\n \n \n-def accuracy(predictions, labels) -> float:\n- \"\"\"\n- Get the accuracy with respect to the most likely label\n- \"\"\"\n- return (predictions == labels).float().mean()\n-\n-\n-def test_torch_metrics():\n- accelerator = Accelerator()\n- model, ddp_model, dataloader = get_setup(accelerator)\n+def generate_predictions(model, dataloader, accelerator):\n+ logits_and_targets = []\n for batch in dataloader:\n- ddp_input, ddp_target = batch.values()\n- # First do single process\n- input, target = accelerator.gather((ddp_input, ddp_target))\n- input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ input, target = batch.values()\n with torch.no_grad():\n logits = model(input)\n- accuracy_single = accuracy(logits.argmax(dim=-1), target)\n- # Then do multiprocess\n- with torch.no_grad():\n- logits = ddp_model(ddp_input)\n- logits, target = accelerator.gather_for_metrics((logits, ddp_target), dataloader)\n- accuracy_multi = accuracy(logits.argmax(dim=-1), target)\n- assert torch.allclose(accuracy_single, accuracy_multi), \"The two accuracies were not the same!\"\n+ logits, target = accelerator.gather_for_metrics((logits, target))\n+ logits_and_targets.append((logits, target))\n+ inps, targs = [], []\n+ for (inp, targ) in logits_and_targets:\n+ inps.append(inp)\n+ targs.append(targ)\n+ inps, targs = torch.cat(inps), torch.cat(targs)\n+ return inps, targs\n+\n+\n+def test_torch_metrics(accelerator: Accelerator, num_samples=82):\n+ model, ddp_model, dataloader = get_setup(accelerator, num_samples)\n+ inps, targs = generate_predictions(ddp_model, dataloader, accelerator)\n+ assert (\n+ len(inps) == num_samples\n+ ), f\"Unexpected number of inputs:\\n Expected: {num_samples}\\n Actual: {len(inps)}\"", "from_author": false }, { "body": "```suggestion\r\nself.gradient_state._set_remainder(length % self.total_batch_size)\r\n```", "diff_hunk": "@@ -453,7 +463,11 @@ def _fetch_batches(self, iterator):\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n- self.gradient_state._set_samples_seen(0)\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.batch_size)", "from_author": false }, { "body": "Irrespective of the above change, `test_metrics.py` fails when `dispatch_batches=True` when the `num_samples % num_processes != 0` , e.g., num_samples=97 and num_processes=2. Below is the modified test with print statements. I think, `IterableDatasetShard` should be used in conjunction with `dispatch_batches=True` for expected behaviour or `DataLoaderDispatcher` needs to be fixed to handle normal dataloaders. \r\n\r\n![Screenshot 2022-07-28 at 10 54 03 AM](https://user-images.githubusercontent.com/13534540/181427059-38d54e57-894e-4ae9-bf6c-9add9549c1a5.png)\r\n", "diff_hunk": "@@ -453,7 +463,11 @@ def _fetch_batches(self, iterator):\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n- self.gradient_state._set_samples_seen(0)\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.batch_size)", "from_author": false }, { "body": "Thanks, will change the test case slightly for this and resolve today, though a bit weird that 82 didn't catch this", "diff_hunk": "@@ -453,7 +463,11 @@ def _fetch_batches(self, iterator):\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n- self.gradient_state._set_samples_seen(0)\n+ try:\n+ length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n+ self.gradient_state._set_remainder(length % self.batch_size)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/578", "pr_id": 1009943936 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex bd4b72682..7100d2c88 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -127,7 +127,7 @@ def set_module_tensor_to_device(\n \n if is_buffer:\n module._buffers[tensor_name] = new_value\n- elif torch.device(device) != module._parameters[tensor_name].device:\n+ elif value is not None or torch.device(device) != module._parameters[tensor_name].device:\n param_cls = type(module._parameters[tensor_name])\n kwargs = module._parameters[tensor_name].__dict__\n new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/577", "pr_id": 1009796763 }, { "diff": "diff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 244bf89b3..bd4b72682 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -127,13 +127,11 @@ def set_module_tensor_to_device(\n \n if is_buffer:\n module._buffers[tensor_name] = new_value\n- elif module._parameters[tensor_name].device.type != \"cuda\":\n+ elif torch.device(device) != module._parameters[tensor_name].device:\n param_cls = type(module._parameters[tensor_name])\n kwargs = module._parameters[tensor_name].__dict__\n new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n module._parameters[tensor_name] = new_value\n- else:\n- module._parameters[tensor_name] = module._parameters[tensor_name].to(device)\n \n \n def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n", "code_comments": [ { "body": "```suggestion\r\n elif device != module._parameters[tensor_name].device.index:\r\n```\r\nThe `device` param is an integer so to properly compare the devices we need to access the index of the `device` attribute of the parameter ;) ", "diff_hunk": "@@ -127,13 +127,11 @@ def set_module_tensor_to_device(\n \n if is_buffer:\n module._buffers[tensor_name] = new_value\n- elif module._parameters[tensor_name].device.type != \"cuda\":\n+ elif device != module._parameters[tensor_name].device:", "from_author": false }, { "body": "Ah should be `torch.device(device)` on the left side to cover all cases!", "diff_hunk": "@@ -127,13 +127,11 @@ def set_module_tensor_to_device(\n \n if is_buffer:\n module._buffers[tensor_name] = new_value\n- elif module._parameters[tensor_name].device.type != \"cuda\":\n+ elif device != module._parameters[tensor_name].device:", "from_author": true }, { "body": "Fixed [here](https://github.com/huggingface/accelerate/pull/576/commits/10334600357bdd014acd67bb255884ee10b54fe6) normally.", "diff_hunk": "@@ -127,13 +127,11 @@ def set_module_tensor_to_device(\n \n if is_buffer:\n module._buffers[tensor_name] = new_value\n- elif module._parameters[tensor_name].device.type != \"cuda\":\n+ elif device != module._parameters[tensor_name].device:", "from_author": true }, { "body": "Can confirm this fixes the issue too !", "diff_hunk": "@@ -127,13 +127,11 @@ def set_module_tensor_to_device(\n \n if is_buffer:\n module._buffers[tensor_name] = new_value\n- elif module._parameters[tensor_name].device.type != \"cuda\":\n+ elif device != module._parameters[tensor_name].device:", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/576", "pr_id": 1009649838 }, { "diff": "diff --git a/.gitignore b/.gitignore\nindex 7f145cd5c..da99824aa 100644\n--- a/.gitignore\n+++ b/.gitignore\n@@ -135,4 +135,7 @@ dmypy.json\n .idea\n \n # Mac .DS_Store\n-.DS_Store\n\\ No newline at end of file\n+.DS_Store\n+\n+# More test things\n+wandb\n\\ No newline at end of file\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 8983468e1..ac8c4de52 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -20,6 +20,8 @@\n import unittest\n from unittest import mock\n \n+import torch\n+\n from accelerate.test_utils.examples import compare_against_test\n from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow\n from accelerate.utils import write_basic_config\n@@ -169,8 +171,16 @@ def test_load_states_by_steps(self):\n --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_2\")}\n \"\"\".split()\n output = run_command(self._launch_args + testargs, return_stdout=True)\n- self.assertNotIn(\"epoch 0:\", output)\n- self.assertIn(\"epoch 1:\", output)\n+ if torch.cuda.is_available():\n+ num_processes = torch.cuda.device_count()\n+ else:\n+ num_processes = 1\n+ if num_processes > 1:\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n+ else:\n+ self.assertIn(\"epoch 0:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n \n @slow\n def test_cross_validation(self):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/573", "pr_id": 1008913052 }, { "diff": "diff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 361e19642..8983468e1 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -166,7 +166,7 @@ def test_load_states_by_epoch(self):\n def test_load_states_by_steps(self):\n testargs = f\"\"\"\n examples/by_feature/checkpointing.py\n- --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_5\")}\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_2\")}\n \"\"\".split()\n output = run_command(self._launch_args + testargs, return_stdout=True)\n self.assertNotIn(\"epoch 0:\", output)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/572", "pr_id": 1008735074 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 1b21ebed7..5e364491e 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -107,6 +107,9 @@ def collate_fn(examples):\n \n \n def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 09199154a..8046b4557 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -83,6 +83,9 @@ def __exit__(self, *exc):\n \n \n def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n # Initialize accelerator\n if args.with_tracking:\n accelerator = Accelerator(\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex d95f9bef3..fe3d948f5 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -102,6 +102,9 @@ def collate_fn(examples):\n \n \n def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n # New Code #\n gradient_accumulation_steps = int(args.gradient_accumulation_steps)\n # Initialize accelerator\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex b11c80fd0..f71283561 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -107,6 +107,9 @@ def collate_fn(examples):\n \n \n def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex dc47f3db6..d69c0bc90 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -109,6 +109,9 @@ def collate_fn(examples):\n \n \n def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 00c8665dd..d453b770c 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -107,6 +107,9 @@ def collate_fn(examples):\n \n \n def training_function(config, args):\n+ # For testing only\n+ if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ config[\"num_epochs\"] = 2\n # Initialize Accelerator\n \n # New Code #\ndiff --git a/src/accelerate/test_utils/examples.py b/src/accelerate/test_utils/examples.py\nindex 4e4092c0e..f459e03c4 100644\n--- a/src/accelerate/test_utils/examples.py\n+++ b/src/accelerate/test_utils/examples.py\n@@ -115,11 +115,17 @@ def compare_against_test(base_filename: str, feature_filename: str, parser_only:\n # Specific code in our script that differs from the full version, aka what is new\n new_feature_code = []\n passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement\n- for i, line in enumerate(feature_file_func):\n+ it = iter(feature_file_func)\n+ for i in range(len(feature_file_func) - 1):\n if i not in passed_idxs:\n+ line = next(it)\n if (line not in full_file_func) and (line.lstrip() != _dl_line):\n- new_feature_code.append(line)\n- passed_idxs.append(i)\n+ if \"TESTING_MOCKED_DATALOADERS\" not in line:\n+ new_feature_code.append(line)\n+ passed_idxs.append(i)\n+ else:\n+ # Skip over the `config['num_epochs'] = 2` statement\n+ _ = next(it)\n \n # Extract out just the new parts from the full_file_training_func\n new_full_example_parts = []\n@@ -127,8 +133,9 @@ def compare_against_test(base_filename: str, feature_filename: str, parser_only:\n for i, line in enumerate(base_file_func):\n if i not in passed_idxs:\n if (line not in full_file_func) and (line.lstrip() != _dl_line):\n- new_full_example_parts.append(line)\n- passed_idxs.append(i)\n+ if \"TESTING_MOCKED_DATALOADERS\" not in line:\n+ new_full_example_parts.append(line)\n+ passed_idxs.append(i)\n \n # Finally, get the overall diff\n diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 9e1854cd8..361e19642 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -20,8 +20,6 @@\n import unittest\n from unittest import mock\n \n-import torch\n-\n from accelerate.test_utils.examples import compare_against_test\n from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow\n from accelerate.utils import write_basic_config\n@@ -145,7 +143,7 @@ def test_checkpointing_by_epoch(self):\n --output_dir {self.tmpdir}\n \"\"\".split()\n run_command(self._launch_args + testargs)\n- self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_0\")))\n \n def test_checkpointing_by_steps(self):\n testargs = f\"\"\"\n@@ -154,17 +152,16 @@ def test_checkpointing_by_steps(self):\n --output_dir {self.tmpdir}\n \"\"\".split()\n _ = run_command(self._launch_args + testargs)\n- self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_5\")))\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_2\")))\n \n def test_load_states_by_epoch(self):\n testargs = f\"\"\"\n examples/by_feature/checkpointing.py\n- --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_0\")}\n \"\"\".split()\n output = run_command(self._launch_args + testargs, return_stdout=True)\n self.assertNotIn(\"epoch 0:\", output)\n- self.assertNotIn(\"epoch 1:\", output)\n- self.assertIn(\"epoch 2:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n \n def test_load_states_by_steps(self):\n testargs = f\"\"\"\n@@ -172,17 +169,8 @@ def test_load_states_by_steps(self):\n --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_5\")}\n \"\"\".split()\n output = run_command(self._launch_args + testargs, return_stdout=True)\n- if torch.cuda.is_available():\n- num_processes = torch.cuda.device_count()\n- else:\n- num_processes = 1\n- if num_processes > 1:\n- self.assertNotIn(\"epoch 0:\", output)\n- self.assertNotIn(\"epoch 1:\", output)\n- else:\n- self.assertNotIn(\"epoch 0:\", output)\n- self.assertIn(\"epoch 1:\", output)\n- self.assertIn(\"epoch 2:\", output)\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n \n @slow\n def test_cross_validation(self):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/571", "pr_id": 1008663257 }, { "diff": "diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex a0f9fab40..b84a90c6e 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -29,10 +29,12 @@\n \n try:\n import torch_ccl # noqa: F401\n+\n _ccl_available = True\n except ImportError:\n- try: \n- import oneccl_bindings_for_pytorch\n+ try:\n+ import oneccl_bindings_for_pytorch # noqa: F401\n+\n _ccl_available = True\n except ImportError:\n _ccl_available = False\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/569", "pr_id": 1008522599 }, { "diff": "diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/check_dependencies.yml\nindex 1e8a4e1bb..f8ac1f492 100644\n--- a/.github/workflows/check_dependencies.yml\n+++ b/.github/workflows/check_dependencies.yml\n@@ -1,4 +1,4 @@\n-name: Trigger docker images and run slow tests\n+name: Trigger docker images and run tests\n \n on:\n push:\ndiff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex 0e8f31407..c0c9c62f2 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -1,4 +1,4 @@\n-name: Self-hosted runner (scheduled)\n+name: Self-hosted runner with slow tests (scheduled)\n \n on:\n workflow_dispatch:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/568", "pr_id": 1008515899 }, { "diff": "diff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex c846dd65d..06a25bc4e 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -17,7 +17,7 @@\n \n import torch\n \n-from .utils import DistributedType, is_ccl_available, is_deepspeed_available, is_tpu_available\n+from .utils import DistributedType, get_ccl_version, is_ccl_available, is_deepspeed_available, is_tpu_available\n from .utils.dataclasses import SageMakerDistributedType\n \n \n@@ -166,6 +166,10 @@ def __init__(\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n self.distributed_type = DistributedType.MULTI_CPU\n if is_ccl_available() and get_int_from_env([\"CCL_WORKER_COUNT\"], 0) > 0:\n+ if get_ccl_version() >= \"1.12\":\n+ import oneccl_bindings_for_pytorch # noqa: F401\n+ else:\n+ import torch_ccl # noqa: F401\n backend = \"ccl\"\n elif torch.distributed.is_mpi_available():\n backend = \"mpi\"\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex f6b83d2f9..0395a271d 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -19,6 +19,7 @@\n TensorInformation,\n )\n from .imports import (\n+ get_ccl_version,\n is_apex_available,\n is_bf16_available,\n is_boto3_available,\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex b84a90c6e..da63d0aa1 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -27,19 +27,6 @@\n import importlib.metadata as importlib_metadata\n \n \n-try:\n- import torch_ccl # noqa: F401\n-\n- _ccl_available = True\n-except ImportError:\n- try:\n- import oneccl_bindings_for_pytorch # noqa: F401\n-\n- _ccl_available = True\n- except ImportError:\n- _ccl_available = False\n-\n-\n try:\n import torch_xla.core.xla_model as xm # noqa: F401\n \n@@ -49,7 +36,14 @@\n \n \n def is_ccl_available():\n- return _ccl_available\n+ return (\n+ importlib.util.find_spec(\"torch_ccl\") is not None\n+ or importlib.util.find_spec(\"oneccl_bindings_for_pytorch\") is not None\n+ )\n+\n+\n+def get_ccl_version():\n+ return importlib_metadata.version(\"oneccl_bind_pt\")\n \n \n def is_apex_available():\n", "code_comments": [], "context": [ { "body": "@liangan1 @yao-matrix @sgugger please have a review", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks for this! Looks great to me πŸ˜„ could you run `make style; make quality` to get past that failure please? And then we're good!", "from_author": false }, { "body": "@muellerzr thanks for the suggestion, fix the \"make quality\" issue.", "from_author": true }, { "body": "done for the rebase @sgugger ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/567", "pr_id": 1007931384 }, { "diff": "diff --git a/setup.py b/setup.py\nindex cad067ad5..a59d1ecaf 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\n extras[\"docs\"] = []\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n-extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\"]\n+extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\", \"tqdm\"]\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex f6b83d2f9..af8ddd736 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -100,3 +100,4 @@\n write_basic_config,\n )\n from .random import set_seed, synchronize_rng_state, synchronize_rng_states\n+from .tqdm import tqdm\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex b84a90c6e..4b796e62c 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -113,3 +113,7 @@ def is_boto3_available():\n \n def is_sagemaker_available():\n return importlib.util.find_spec(\"sagemaker\") is not None\n+\n+\n+def is_tqdm_available():\n+ return importlib.util.find_spec(\"tqdm\") is not None\ndiff --git a/src/accelerate/utils/tqdm.py b/src/accelerate/utils/tqdm.py\nnew file mode 100644\nindex 000000000..be489bd90\n--- /dev/null\n+++ b/src/accelerate/utils/tqdm.py\n@@ -0,0 +1,37 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from .imports import is_tqdm_available\n+\n+\n+if is_tqdm_available():\n+ import tqdm.auto as _tqdm\n+\n+from ..state import AcceleratorState\n+\n+\n+def tqdm(main_process_only: bool = True, *args, **kwargs):\n+ \"\"\"\n+ Wrapper around `tqdm.tqdm` that optionally displays only on the main process.\n+\n+ Args:\n+ main_process_only (`bool`, *optional*):\n+ Whether to display the progress bar only on the main process\n+ \"\"\"\n+ if not is_tqdm_available():\n+ raise ImportError(\"Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.\")\n+ disable = False\n+ if main_process_only:\n+ disable = AcceleratorState().local_process_index == 0\n+ return _tqdm(*args, **kwargs, disable=disable)\n", "code_comments": [ { "body": "Let's make tqdm a soft dependency.", "diff_hunk": "@@ -50,7 +50,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\", \"tqdm\"],", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/564", "pr_id": 1007414023 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 60b6647fd..cad067ad5 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\n extras[\"docs\"] = []\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n-extras[\"test_dev\"] = [\"datasets<=2.2.2\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\"]\n+extras[\"test_dev\"] = [\"datasets\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\"]\n extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/563", "pr_id": 1007337710 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 514e77d05..9ee840440 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -27,8 +27,10 @@\n \n import torch\n \n+import psutil\n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n+from accelerate.state import get_int_from_env\n from accelerate.utils import (\n ComputeEnvironment,\n DistributedType,\n@@ -273,7 +275,7 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--num_cpu_threads_per_process\",\n type=int,\n- default=1,\n+ default=None,\n help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\n )\n parser.add_argument(\n@@ -337,6 +339,17 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process is None:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ logger.info(\n+ f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process} to improve oob performance.\"\n+ )\n+\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n \n process = subprocess.Popen(cmd, env=current_env)\n@@ -434,7 +447,9 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n-\n+ if args.num_cpu_threads_per_process is None:\n+ args.num_cpu_threads_per_process = 1\n+ logger.info(f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process}.\")\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n", "code_comments": [ { "body": "We shouldn't change this value if the user has set it to 1. Maybe change the default of `args.num_cpu_threads_per_process` to `None` and then set it to the right default if it's unset.", "diff_hunk": "@@ -307,6 +309,20 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process == 1:", "from_author": false }, { "body": "Adapt the warning to the behavior suggested above (no default was provided, we picked xxx). Also use f-strings please.", "diff_hunk": "@@ -307,6 +309,20 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process == 1:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ if args.num_cpu_threads_per_process != 1:\n+ logger.warn(\n+ 'num_cpu_threads_per_process is reset to %d instead of default 1 to improve oob performance.\\\n+ \\nyou could tun num_cpu_threads_per_process by pass --num_cpu_threads_per_process xxx to \"accelerate launch \"\\n'\n+ % args.num_cpu_threads_per_process", "from_author": false }, { "body": "apply you suggestion @sgugger ", "diff_hunk": "@@ -307,6 +309,20 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process == 1:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ if args.num_cpu_threads_per_process != 1:\n+ logger.warn(\n+ 'num_cpu_threads_per_process is reset to %d instead of default 1 to improve oob performance.\\\n+ \\nyou could tun num_cpu_threads_per_process by pass --num_cpu_threads_per_process xxx to \"accelerate launch \"\\n'\n+ % args.num_cpu_threads_per_process", "from_author": true }, { "body": "```suggestion\r\n f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process} to improve oob performance.\"\r\n```", "diff_hunk": "@@ -337,6 +339,17 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process is None:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ logger.warn(\n+ f\"num_cpu_threads_per_process unset, we pick as {args.num_cpu_threads_per_process} to improve oob performance\"", "from_author": false }, { "body": "```suggestion\r\n logger.info(\r\n```\r\nMaybe the info level is enough?", "diff_hunk": "@@ -337,6 +339,17 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process is None:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ logger.warn(", "from_author": false }, { "body": "```suggestion\r\n logger.info(f\"num_cpu_threads_per_process unset, we set it at {args.num_cpu_threads_per_process}.\")\r\n```", "diff_hunk": "@@ -434,7 +447,9 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n if args.fsdp_state_dict_type is not None:\n current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n-\n+ if args.num_cpu_threads_per_process is None:\n+ args.num_cpu_threads_per_process = 1\n+ logger.warn(f\"num_cpu_threads_per_process unset, we pick as {args.num_cpu_threads_per_process}\")", "from_author": false }, { "body": "change to info", "diff_hunk": "@@ -337,6 +339,17 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ if args.num_cpu_threads_per_process is None:\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ args.num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n+ if args.num_cpu_threads_per_process == 0:\n+ args.num_cpu_threads_per_process = 1\n+ logger.warn(", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger @yao-matrix @liangan1 please help review it", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/562", "pr_id": 1006853747 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 35afd6c5d..5aea696b4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -446,16 +446,13 @@ def _prepare_one(self, obj, first_pass=False):\n if isinstance(obj, torch.utils.data.DataLoader):\n return self.prepare_data_loader(obj)\n elif isinstance(obj, torch.nn.Module):\n- self._models.append(obj)\n return self.prepare_model(obj)\n elif isinstance(obj, torch.optim.Optimizer):\n optimizer = self.prepare_optimizer(obj)\n- self._optimizers.append(optimizer)\n return optimizer\n # Second pass of preparation: LR scheduler (which need the full list of optimizers)\n elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler):\n scheduler = self.prepare_scheduler(obj)\n- self._schedulers.append(scheduler)\n return scheduler\n # Return the unprocessed object if previous criteria was not met\n return obj\n@@ -570,6 +567,7 @@ def prepare(self, *args):\n return result if len(result) > 1 else result[0]\n \n def prepare_model(self, model):\n+ self._models.append(model)\n if self.device_placement and self.distributed_type != DistributedType.FSDP:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n@@ -782,7 +780,9 @@ def prepare_data_loader(self, data_loader):\n )\n \n def prepare_optimizer(self, optimizer):\n- return AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n+ optimizer = AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n+ self._optimizers.append(optimizer)\n+ return optimizer\n \n def prepare_scheduler(self, scheduler):\n # We try to find the optimizer associated with `scheduler`, the default is the full list.\n@@ -791,13 +791,14 @@ def prepare_scheduler(self, scheduler):\n if getattr(scheduler, \"optimizer\", None) == opt.optimizer:\n optimizer = opt\n break\n-\n- return AcceleratedScheduler(\n+ scheduler = AcceleratedScheduler(\n scheduler,\n optimizer,\n step_with_optimizer=self.step_scheduler_with_optimizer,\n split_batches=self.split_batches,\n )\n+ self._schedulers.append(scheduler)\n+ return scheduler\n \n def backward(self, loss, **kwargs):\n \"\"\"\n", "code_comments": [], "context": [ { "body": "Thanks! If we want to go this route, it would be good to update the rest of them as well. I.e. optimizer and scheduler", "from_author": false }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@muellerzr thanks for the comment, I updated the optimizer and scheduler as well. If any other changes are needed, please let me know.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/556", "pr_id": 1005987275 }, { "diff": "diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex a660db858..8bffd8e99 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -29,10 +29,13 @@\n \n try:\n import torch_ccl # noqa: F401\n-\n _ccl_available = True\n except ImportError:\n- _ccl_available = False\n+ try: \n+ import oneccl_bindings_for_pytorch\n+ _ccl_available = True\n+ except ImportError:\n+ _ccl_available = False\n \n \n try:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/554", "pr_id": 1005929556 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 8d4138651..e6a23e46d 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -16,6 +16,7 @@\n \n import argparse\n import importlib\n+import logging\n import os\n import subprocess\n import sys\n@@ -24,6 +25,8 @@\n from pathlib import Path\n from typing import Dict, List\n \n+import torch\n+\n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.utils import (\n@@ -40,6 +43,9 @@\n from accelerate.utils.dataclasses import SageMakerDistributedType\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n def launch_command_parser(subparsers=None):\n if subparsers is not None:\n parser = subparsers.add_parser(\"launch\")\n@@ -659,6 +665,7 @@ def launch_command(args):\n raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`, `--use_fsdp`.\")\n \n defaults = None\n+ warned = []\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n@@ -684,7 +691,6 @@ def launch_command(args):\n and getattr(args, name, None) is None\n ):\n setattr(args, name, attr)\n-\n if not args.mixed_precision:\n if args.fp16:\n args.mixed_precision = \"fp16\"\n@@ -692,7 +698,32 @@ def launch_command(args):\n args.mixed_precision = defaults.mixed_precision\n else:\n if args.num_processes is None:\n+ warned.append(\"\\t`--num_processes` was set to a value of `1`\")\n args.num_processes = 1\n+ if args.num_machines is None:\n+ warned.append(\"\\t`--num_machines` was set to a value of `1`\")\n+ args.num_machines = 1\n+ if args.mixed_precision is None:\n+ warned.append(\"\\t`--mixed_precision` was set to a value of `'no'`\")\n+ args.mixed_precision = \"no\"\n+ if not hasattr(args, \"use_cpu\"):\n+ args.use_cpu = args.cpu\n+ if args.multi_gpu and args.num_processes == 1:\n+ args.num_processes = torch.cuda.device_count()\n+ if not any(\"--num_processes\" in warn for warn in warned):\n+ warned.append(f\"\\t`--num_processes` was set to `{args.num_processes}`\")\n+ else:\n+ for i, warn in enumerate(warned):\n+ if \"--num_processes\" in warn:\n+ warned[i] = warn.replace(\"`1`\", f\"`{args.num_processes}`\")\n+\n+ if any(warned):\n+ message = \"The following values were not passed to `accelerate launch` and had defaults used instead:\\n\"\n+ message += \"\\n\".join(warned)\n+ message += (\n+ \"\\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.\"\n+ )\n+ logger.warn(message)\n \n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n", "code_comments": [ { "body": "```suggestion\r\n logger.warn(\"To avoid these warnings pass in values for each of the problematic parameters or run `accelerate config`.\")\r\n```", "diff_hunk": "@@ -692,16 +696,26 @@ def launch_command(args):\n else:\n args.mixed_precision = defaults.mixed_precision\n else:\n+ warned = False\n if args.num_processes is None:\n+ logger.warn(\"`--num_processes` was not set, using a value of `1`.\")\n+ warned = True\n args.num_processes = 1\n if args.num_machines is None:\n+ warned = True\n+ logger.warn(\"`--num_machines` was not set, using a value of `1`.\")\n args.num_machines = 1\n if args.mixed_precision is None:\n+ warned = True\n+ logger.warn(\"`--mixed_precision` was not set, using a value of `'no'`.\")\n args.mixed_precision = \"no\"\n if not hasattr(args, \"use_cpu\"):\n- args.use_cpu = torch.cuda.is_available()\n+ args.use_cpu = args.cpu\n+ if warned:\n+ logger.warn(\"To avoid these warnings pass in values for each of the problematic parameters\")", "from_author": false }, { "body": "This one should have the text above to avoid warning (which you can put in a constant).", "diff_hunk": "@@ -692,16 +696,26 @@ def launch_command(args):\n else:\n args.mixed_precision = defaults.mixed_precision\n else:\n+ warned = False\n if args.num_processes is None:\n+ logger.warn(\"`--num_processes` was not set, using a value of `1`.\")\n+ warned = True\n args.num_processes = 1\n if args.num_machines is None:\n+ warned = True\n+ logger.warn(\"`--num_machines` was not set, using a value of `1`.\")\n args.num_machines = 1\n if args.mixed_precision is None:\n+ warned = True\n+ logger.warn(\"`--mixed_precision` was not set, using a value of `'no'`.\")\n args.mixed_precision = \"no\"\n if not hasattr(args, \"use_cpu\"):\n- args.use_cpu = torch.cuda.is_available()\n+ args.use_cpu = args.cpu\n+ if warned:\n+ logger.warn(\"To avoid these warnings pass in values for each of the problematic parameters\")\n \n if args.multi_gpu and args.num_processes == 1:\n+ logger.warn(\"`--multi_gpu` was passed but `num_processes` was not set. Automatically using all available GPUs\")", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/553", "pr_id": 1005729256 }, { "diff": "diff --git a/docs/source/accelerator.mdx b/docs/source/accelerator.mdx\nindex 5202aeb8c..268b09d1f 100644\n--- a/docs/source/accelerator.mdx\n+++ b/docs/source/accelerator.mdx\n@@ -44,11 +44,12 @@ actually be performed, and auto-scale the loss:\n \n ```python\n accelerator = Accelerator(gradient_accumulation_steps=2)\n+model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)\n \n-for (input, label) in enumerate(training_dataloader):\n+for input, label in training_dataloader:\n with accelerator.accumulate(model):\n predictions = model(input)\n- loss = loss_function(predictions, labels)\n+ loss = loss_function(predictions, label)\n accelerator.backward(loss)\n optimizer.step()\n scheduler.step()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger Thanks, I applied the change.", "from_author": true }, { "body": "Thanks a lot!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/551", "pr_id": 1005702474 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex c0871b28c..fe44d5f61 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -12,7 +12,9 @@ jobs:\n strategy:\n matrix:\n test-kind: [\n- test,\n+ test_prod,\n+ test_core,\n+ test_big_modeling,\n test_deepspeed,\n test_example_differences,\n test_checkpoint_step,\n@@ -37,8 +39,9 @@ jobs:\n - name: Install the library\n run: |\n pip install --upgrade pip\n- pip install -e .[test,test_trackers]\n- if [ ${{ matrix.test-kind }} = test_rest ]; then pip uninstall comet_ml -y; fi\n+ if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi\n+ if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[test,test_trackers]; fi\n+ if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi\n \n - name: Run Tests\n run: |\ndiff --git a/Makefile b/Makefile\nindex 6dab6af11..4104f1bba 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -27,6 +27,12 @@ style:\n test:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py\n \n+test_big_modeling:\n+\tpython -m pytest -s -v ./tests/test_big_modeling.py\n+\n+test_core:\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py\n+\n test_deepspeed:\n \tpython -m pytest -s -v ./tests/deepspeed\n \n@@ -43,5 +49,9 @@ test_checkpoint_epoch:\n test_checkpoint_step:\n \tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\"\n \n+# Same as test but used to install only the base dependencies\n+test_prod:\n+\t$(MAKE) test_core\n+\n test_rest:\n \tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\"\n\\ No newline at end of file\ndiff --git a/setup.py b/setup.py\nindex 77679a51e..dedd077f3 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -18,18 +18,9 @@\n extras = {}\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\n extras[\"docs\"] = []\n-extras[\"test\"] = [\n- \"pytest\",\n- \"pytest-xdist\",\n- \"pytest-subtests\",\n- \"datasets<=2.2.2\",\n- \"evaluate\",\n- \"transformers\",\n- \"scipy\",\n- \"sklearn\",\n- \"parameterized\",\n- \"deepspeed\",\n-]\n+extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n+extras[\"test_dev\"] = [\"datasets<=2.2.2\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\"]\n+extras[\"test\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\ndiff --git a/src/accelerate/test_utils/scripts/test_metrics.py b/src/accelerate/test_utils/scripts/test_metrics.py\nindex 442053213..c426d1fc5 100644\n--- a/src/accelerate/test_utils/scripts/test_metrics.py\n+++ b/src/accelerate/test_utils/scripts/test_metrics.py\n@@ -17,7 +17,6 @@\n import torch\n from torch.utils.data import DataLoader\n \n-import evaluate\n from accelerate import Accelerator\n from accelerate.test_utils import RegressionDataset, RegressionModel\n from accelerate.utils import set_seed\n@@ -61,26 +60,6 @@ def test_torch_metrics():\n assert torch.allclose(accuracy_single, accuracy_multi), \"The two accuracies were not the same!\"\n \n \n-def test_evaluate_metrics():\n- metric = evaluate.load(\"accuracy\")\n- accelerator = Accelerator()\n- model, ddp_model, dataloader = get_setup(accelerator)\n- for batch in dataloader:\n- ddp_input, ddp_target = batch.values()\n- # First do single process\n- input, target = accelerator.gather((ddp_input, ddp_target))\n- input, target = input.to(accelerator.device), target.to(accelerator.device)\n- with torch.no_grad():\n- logits = model(input)\n- accuracy_single = metric.compute(logits, target)\n- # Then do multiprocess\n- with torch.no_grad():\n- logits = ddp_model(ddp_input)\n- logits, target = accelerator.gather_for_metrics((logits, ddp_target), dataloader)\n- accuracy_multi = metric.compute(logits, target)\n- assert torch.allclose(accuracy_single, accuracy_multi), \"The two accuracies were not the same!\"\n-\n-\n def main():\n accelerator = Accelerator()\n state = accelerator.state\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/547", "pr_id": 1004464845 }, { "diff": "diff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nindex 59677b34e..028b99d9a 100644\n--- a/.github/workflows/on-merge.yml\n+++ b/.github/workflows/on-merge.yml\n@@ -25,7 +25,7 @@ jobs:\n source activate accelerate\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n- pip install -e .[test,test_trackers]\n+ pip install -e .[testing,test_trackers]\n \n - name: Run test on GPUs\n run: |\n@@ -52,7 +52,7 @@ jobs:\n source activate accelerate\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n- pip install -e .[test,test_trackers]\n+ pip install -e .[testing,test_trackers]\n \n - name: Run test on GPUs\n run: |\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex fe44d5f61..09330eb63 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -40,7 +40,7 @@ jobs:\n run: |\n pip install --upgrade pip\n if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi\n- if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[test,test_trackers]; fi\n+ if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi\n if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi\n \n - name: Run Tests\ndiff --git a/docker/accelerate-cpu/Dockerfile b/docker/accelerate-cpu/Dockerfile\nindex 5d1286772..a872e6fb2 100644\n--- a/docker/accelerate-cpu/Dockerfile\n+++ b/docker/accelerate-cpu/Dockerfile\n@@ -21,7 +21,7 @@ WORKDIR /workspace\n RUN python3 -m pip install --upgrade --no-cache-dir pip\n RUN python3 -m pip install --no-cache-dir \\\n jupyter \\\n- git+https://github.com/huggingface/accelerate#egg=accelerate[test,test_trackers] \\\n+ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \\\n --extra-index-url https://download.pytorch.org/whl/cpu\n \n # Stage 2\ndiff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nindex 746e65350..44aa94b29 100644\n--- a/docker/accelerate-gpu/Dockerfile\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -22,7 +22,7 @@ SHELL [\"/bin/bash\", \"-c\"]\n # Activate the conda env and install torch + accelerate\n RUN source activate accelerate && \\\n python3 -m pip install --no-cache-dir \\\n- git+https://github.com/huggingface/accelerate#egg=accelerate[test,test_trackers] \\\n+ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \\\n --extra-index-url https://download.pytorch.org/whl/cu113\n \n # Stage 2\ndiff --git a/setup.py b/setup.py\nindex dedd077f3..60b6647fd 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -20,10 +20,10 @@\n extras[\"docs\"] = []\n extras[\"test_prod\"] = [\"pytest\", \"pytest-xdist\", \"pytest-subtests\", \"parameterized\"]\n extras[\"test_dev\"] = [\"datasets<=2.2.2\", \"evaluate\", \"transformers\", \"scipy\", \"sklearn\", \"deepspeed\"]\n-extras[\"test\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n+extras[\"testing\"] = extras[\"test_prod\"] + extras[\"test_dev\"]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\n-extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n+extras[\"dev\"] = extras[\"quality\"] + extras[\"testing\"]\n \n extras[\"sagemaker\"] = [\n \"sagemaker\", # boto3 is a required package in sagemaker\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/545", "pr_id": 1004390651 }, { "diff": "diff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 6d3e7d3ff..c846dd65d 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -130,8 +130,16 @@ def __init__(\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n self.distributed_type = DistributedType.DEEPSPEED\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n+ from .utils import compare_versions\n+\n self.backend = \"nccl\"\n+ if compare_versions(\"deepspeed\", \">\", \"0.6.5\"):\n+ from deepspeed import comm as dist\n+\n+ dist.init_distributed(dist_backend=self.backend)\n+ else:\n+ torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n+\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n", "code_comments": [ { "body": "```suggestion\r\n \"deepspeed<=0.6.5\",\r\n```\r\nI don't think we required 0.6.5 as a min version before, so let's not pin exactly.", "diff_hunk": "@@ -28,7 +28,7 @@\n \"scipy\",\n \"sklearn\",\n \"parameterized\",\n- \"deepspeed\",\n+ \"deepspeed==0.6.5\",", "from_author": false }, { "body": "This can be imported at the top.", "diff_hunk": "@@ -130,8 +130,16 @@ def __init__(\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n self.distributed_type = DistributedType.DEEPSPEED\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n+ from .utils import compare_versions", "from_author": false }, { "body": "that was leading to circular dependency error\r\n", "diff_hunk": "@@ -130,8 +130,16 @@ def __init__(\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n self.distributed_type = DistributedType.DEEPSPEED\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n+ from .utils import compare_versions", "from_author": true }, { "body": "Ah yes, I can see that! Good for me then!", "diff_hunk": "@@ -130,8 +130,16 @@ def __init__(\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n self.distributed_type = DistributedType.DEEPSPEED\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n+ from .utils import compare_versions", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/544", "pr_id": 1004131975 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 3cc1059e1..cc4a67571 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -927,7 +927,7 @@ def wait_for_everyone(self):\n \"\"\"\n wait_for_everyone()\n \n- def init_trackers(self, project_name: str, config: Optional[dict] = None):\n+ def init_trackers(self, project_name: str, config: Optional[dict] = None, init_kwargs: Optional[dict] = {}):\n \"\"\"\n Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations\n \n@@ -936,6 +936,12 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n The name of the project. All trackers will save their data based on this\n config (`dict`, *optional*):\n Optional starting configuration to be logged.\n+ init_kwargs (`dict`, *optional*):\n+ A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be\n+ formatted like this:\n+ ```python\n+ {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\n+ ```\n \"\"\"\n self.trackers = []\n for tracker in self.log_with:\n@@ -946,14 +952,16 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]\n if getattr(tracker_init, \"requires_logging_directory\"):\n # We can skip this check since it was done in `__init__`\n- self.trackers.append(tracker_init(project_name, self.logging_dir))\n+ self.trackers.append(\n+ tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {}))\n+ )\n else:\n- self.trackers.append(tracker_init(project_name))\n+ self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {})))\n if config is not None:\n for tracker in self.trackers:\n tracker.store_init_configuration(config)\n \n- def log(self, values: dict, step: Optional[int] = None):\n+ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dict] = {}):\n \"\"\"\n Logs `values` to all stored trackers in `self.trackers`.\n \n@@ -962,10 +970,16 @@ def log(self, values: dict, step: Optional[int] = None):\n Values should be a dictionary-like object containing only types `int`, `float`, or `str`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n+ log_kwargs (`dict`, *optional*):\n+ A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted\n+ like this:\n+ ```python\n+ {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\n+ ```\n \"\"\"\n if self.is_main_process:\n for tracker in self.trackers:\n- tracker.log(values, step=step)\n+ tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))\n \n def end_training(self):\n \"\"\"\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 3efff77c9..b89630a4e 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -52,8 +52,16 @@ def get_available_trackers():\n class GeneralTracker(object, metaclass=ABCMeta):\n \"\"\"\n A base Tracker class to be used for all logging integration implementations.\n+\n+ Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to\n+ [`Accelerator`]\n \"\"\"\n \n+ @abstractproperty\n+ def name(self):\n+ \"String representation of the python class name\"\n+ pass\n+\n @abstractproperty\n def requires_logging_directory(self):\n \"\"\"\n@@ -75,7 +83,7 @@ def store_init_configuration(self, values: dict):\n pass\n \n @abstractmethod\n- def log(self, values: dict, step: Optional[int]):\n+ def log(self, values: dict, step: Optional[int], **kwargs):\n \"\"\"\n Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with\n special behavior for the `step parameter.\n@@ -105,14 +113,17 @@ class TensorBoardTracker(GeneralTracker):\n The name of the experiment run\n logging_dir (`str`, `os.PathLike`):\n Location for TensorBoard logs to be stored.\n+ kwargs:\n+ Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.\n \"\"\"\n \n+ name = \"tensorboard\"\n requires_logging_directory = True\n \n- def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]):\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]], **kwargs):\n self.run_name = run_name\n self.logging_dir = os.path.join(logging_dir, run_name)\n- self.writer = tensorboard.SummaryWriter(self.logging_dir)\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\n logger.info(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\n logger.info(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n@@ -131,7 +142,7 @@ def store_init_configuration(self, values: dict):\n self.writer.flush()\n logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n \n- def log(self, values: dict, step: Optional[int] = None):\n+ def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n \n@@ -141,14 +152,17 @@ def log(self, values: dict, step: Optional[int] = None):\n `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n+ kwargs:\n+ Additional key word arguments passed along to either `SummaryWriter.add_scaler`,\n+ `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`.\n \"\"\"\n for k, v in values.items():\n if isinstance(v, (int, float)):\n- self.writer.add_scalar(k, v, global_step=step)\n+ self.writer.add_scalar(k, v, global_step=step, **kwargs)\n elif isinstance(v, str):\n- self.writer.add_text(k, v, global_step=step)\n+ self.writer.add_text(k, v, global_step=step, **kwargs)\n elif isinstance(v, dict):\n- self.writer.add_scalars(k, v, global_step=step)\n+ self.writer.add_scalars(k, v, global_step=step, **kwargs)\n self.writer.flush()\n logger.info(\"Successfully logged to TensorBoard\")\n \n@@ -167,13 +181,16 @@ class WandBTracker(GeneralTracker):\n Args:\n run_name (`str`):\n The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `wandb.init` method.\n \"\"\"\n \n+ name = \"wandb\"\n requires_logging_directory = False\n \n- def __init__(self, run_name: str):\n+ def __init__(self, run_name: str, **kwargs):\n self.run_name = run_name\n- self.run = wandb.init(project=self.run_name)\n+ self.run = wandb.init(project=self.run_name, **kwargs)\n logger.info(f\"Initialized WandB project {self.run_name}\")\n logger.info(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n@@ -191,7 +208,7 @@ def store_init_configuration(self, values: dict):\n wandb.config.update(values)\n logger.info(\"Stored initial configuration hyperparameters to WandB\")\n \n- def log(self, values: dict, step: Optional[int] = None):\n+ def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n \n@@ -201,8 +218,10 @@ def log(self, values: dict, step: Optional[int] = None):\n `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n+ kwargs:\n+ Additional key word arguments passed along to the `wandb.log` method.\n \"\"\"\n- self.run.log(values, step=step)\n+ self.run.log(values, step=step, **kwargs)\n logger.info(\"Successfully logged to WandB\")\n \n def finish(self):\n@@ -222,13 +241,16 @@ class CometMLTracker(GeneralTracker):\n Args:\n run_name (`str`):\n The name of the experiment run.\n+ kwargs:\n+ Additional key word arguments passed along to the `Experiment.__init__` method.\n \"\"\"\n \n+ name = \"comet_ml\"\n requires_logging_directory = False\n \n- def __init__(self, run_name: str):\n+ def __init__(self, run_name: str, **kwargs):\n self.run_name = run_name\n- self.writer = Experiment(project_name=run_name)\n+ self.writer = Experiment(project_name=run_name, **kwargs)\n logger.info(f\"Initialized CometML project {self.run_name}\")\n logger.info(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n@@ -246,7 +268,7 @@ def store_init_configuration(self, values: dict):\n self.writer.log_parameters(values)\n logger.info(\"Stored initial configuration hyperparameters to CometML\")\n \n- def log(self, values: dict, step: Optional[int] = None):\n+ def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n \n@@ -256,16 +278,19 @@ def log(self, values: dict, step: Optional[int] = None):\n `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n+ kwargs:\n+ Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`,\n+ or `Experiment.log_metrics` method based on the contents of `values`.\n \"\"\"\n if step is not None:\n self.writer.set_step(step)\n for k, v in values.items():\n if isinstance(v, (int, float)):\n- self.writer.log_metric(k, v, step=step)\n+ self.writer.log_metric(k, v, step=step, **kwargs)\n elif isinstance(v, str):\n- self.writer.log_other(k, v)\n+ self.writer.log_other(k, v, **kwargs)\n elif isinstance(v, dict):\n- self.writer.log_metrics(v, step=step)\n+ self.writer.log_metrics(v, step=step, **kwargs)\n logger.info(\"Successfully logged to CometML\")\n \n def finish(self):\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex a37ae7128..c32502796 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -100,7 +100,8 @@ def test_init_trackers(self):\n project_name = \"test_project_with_config\"\n accelerator = Accelerator(log_with=\"wandb\")\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n- accelerator.init_trackers(project_name, config)\n+ kwargs = {\"wandb\": {\"tags\": [\"my_tag\"]}}\n+ accelerator.init_trackers(project_name, config, kwargs)\n accelerator.end_training()\n # The latest offline log is stored at wandb/latest-run/*.wandb\n for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n@@ -116,6 +117,7 @@ def test_init_trackers(self):\n self.assertEqual(self.get_value_from_log(\"learning_rate\", cleaned_log), \"0.01\")\n self.assertEqual(self.get_value_from_log(\"some_boolean\", cleaned_log), \"false\")\n self.assertEqual(self.get_value_from_log(\"some_string\", cleaned_log), \"some_value\")\n+ self.assertIn(\"my_tag\", cleaned_log)\n \n def test_log(self):\n project_name = \"test_project_with_log\"\n@@ -214,6 +216,7 @@ class MyCustomTracker(GeneralTracker):\n \"some_string\",\n ]\n \n+ name = \"my_custom_tracker\"\n requires_logging_directory = False\n \n def __init__(self, dir: str):\n", "code_comments": [ { "body": "```suggestion\r\n formatted like this:\r\n```", "diff_hunk": "@@ -936,6 +936,12 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n The name of the project. All trackers will save their data based on this\n config (`dict`, *optional*):\n Optional starting configuration to be logged.\n+ init_kwargs (`dict`, *optional*):\n+ A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be\n+ formatted as {tracker_name:{kwarg_a:value_a}}, such as:", "from_author": false }, { "body": "```suggestion\r\n like this:\r\n```", "diff_hunk": "@@ -962,10 +970,16 @@ def log(self, values: dict, step: Optional[int] = None):\n Values should be a dictionary-like object containing only types `int`, `float`, or `str`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n+ log_kwargs (`dict`, *optional*):\n+ A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted\n+ as {tracker_name:{kwarg_a:value_a}}, such as:", "from_author": false }, { "body": "```suggestion\r\n Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to [`Accelerator`]\r\n```", "diff_hunk": "@@ -52,8 +52,15 @@ def get_available_trackers():\n class GeneralTracker(object, metaclass=ABCMeta):\n \"\"\"\n A base Tracker class to be used for all logging integration implementations.\n+\n+ Each function should take in `**kwargs` that will automatically be passed in from a base dictionary in Accelerator", "from_author": false }, { "body": "```suggestion\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n```", "diff_hunk": "@@ -8,7 +8,7 @@\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY name, either express or implied.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/542", "pr_id": 1002954871 }, { "diff": "diff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 2e9cbe203..4b144ba05 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -120,14 +120,13 @@ validation_dataloader = accelerator.prepare(validation_dataloader)\n \n As for your training dataloader, it will mean that (should you run your script on multiple devices) each device will\n only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to\n-do with the [`~Accelerator.gather`] method.\n+do with the [`~Accelerator.gather_for_metrics`] method.\n \n ```python\n for inputs, targets in validation_dataloader:\n predictions = model(inputs)\n # Gather all predictions and targets\n- all_predictions = accelerator.gather(predictions)\n- all_targets = accelerator.gather(targets)\n+ all_predictions, all_targets = accelerator.gather_for_metrics((predictions, targets))\n # Example of use with a *Datasets.Metric*\n metric.add_batch(all_predictions, all_targets)\n ```\n@@ -141,11 +140,17 @@ As for the training dataloader, passing your validation dataloader through\n Any instruction using your training dataloader length (for instance if you need the number of total training steps\n to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n \n+As some data at the end of the dataset may be duplicated so the batch can divide equally to all workers, metrics should be \n+calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data.\n+\n+If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather \n+the data across all processes and this can manually be done instead.\n+\n </Tip>\n \n <Tip warning={true}>\n \n-The [`~Accelerator.gather`] method requires the tensors to be all the same size on each process. If\n+The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If\n you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in\n a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the\n biggest size across processes.\ndiff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 6880f4626..1b21ebed7 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -238,7 +238,7 @@ def training_function(config, args):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex 95af2a6c9..79a207fa8 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -207,7 +207,9 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics(\n+ (predictions, batch[\"labels\"]), eval_dataloader\n+ )\n metric.add_batch(\n predictions=predictions,\n references=references,\n@@ -226,7 +228,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), test_dataloader)\n fold_predictions.append(predictions.cpu())\n if i == 0:\n # We need all of the test predictions\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 8da906ee9..8e3b9e130 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -270,7 +270,6 @@ def collate_fn(examples):\n # context manager to track the peak memory usage during the evaluation\n with TorchTracemalloc() as tracemalloc:\n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n@@ -278,15 +277,9 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather(\n- (predictions, batch[\"labels\"])\n- ) # If we are in a multiprocess environment, the last batch has duplicates\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics(\n+ (predictions, batch[\"labels\"]), eval_dataloader\n+ )\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex 403587f10..d95f9bef3 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -171,7 +171,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 65feed51c..b11c80fd0 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -183,7 +183,9 @@ def inner_training_loop(batch_size):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics(\n+ (predictions, batch[\"labels\"]), eval_dataloader\n+ )\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex 741e62b8c..dc47f3db6 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -187,6 +187,8 @@ def training_function(config, args):\n else:\n # Otherwise we add the number of samples seen\n samples_seen += references.shape[0]\n+ # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:\n+ # accelerator.gather_for_metrics((predictions, references), eval_dataloader)\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 3278a913a..78d4af422 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -197,7 +197,7 @@ def training_function(config, args):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 6e414bc9c..8f893b7c4 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -233,7 +233,6 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n model.eval()\n accurate = 0\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -241,19 +240,11 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- predictions = predictions[: len(eval_dataloader) - samples_seen]\n- references = references[: len(eval_dataloader) - samples_seen]\n- else:\n- samples_seen += references.shape[0]\n- else:\n- samples_seen += references.shape[0]\n- accurate_preds = predictions == references\n+ predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]), eval_dataloader)\n+ accurate_preds = predictions == labels\n accurate += accurate_preds.long().sum()\n \n- eval_metric = accurate.item() / samples_seen\n+ eval_metric = accurate.item() / accelerator.gradient_state.samples_seen\n # Use accelerator.print to print only on the main process.\n accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n if args.with_tracking:\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 6e6aec0ff..572bc9a23 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -211,7 +211,6 @@ def collate_fn(examples):\n accelerator.save_state(output_dir)\n \n model.eval()\n- samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n@@ -219,15 +218,7 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather(\n- (predictions, batch[\"labels\"])\n- ) # If we are in a multiprocess environment, the last batch has duplicates\n- if accelerator.use_distributed:\n- if step == len(eval_dataloader) - 1:\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex 483bddac3..aa97ce2d7 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -173,7 +173,8 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ predictions, labels = accelerator.gather_for_metrics((predictions, batch[\"label\"]), eval_dataloader)\n+ accurate_preds = predictions == labels\n num_elems += accurate_preds.shape[0]\n accurate += accurate_preds.long().sum()\n \ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 55dedae5e..d64ab3bb7 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -158,7 +158,7 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather_for_metrics((predictions, batch[\"labels\"]), eval_dataloader)\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 3cc1059e1..6a2aa9703 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -51,6 +51,7 @@\n is_torch_version,\n is_tpu_available,\n pad_across_processes,\n+ recursively_apply,\n reduce,\n save,\n wait_for_everyone,\n@@ -874,6 +875,35 @@ def gather(self, tensor):\n \"\"\"\n return gather(tensor)\n \n+ def gather_for_metrics(self, tensor, dataloader):\n+ \"\"\"\n+ Gathers `tensor` and potentially drops duplicates in the last batch if on a distributed system. Should be used\n+ for gathering the inputs and targets for metric calculation.\n+\n+ Args:\n+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n+ The tensors for calculating metrics across all processes.\n+ dataloader (`torch.utils.data.DataLoader`):\n+ A dataloader prepared with `Accelerator.prepare`\n+ \"\"\"\n+ tensor = self.gather(tensor)\n+ if self.use_distributed:\n+ try:\n+ # Then see if we're on the last batch of our eval dataloader\n+ if self.gradient_state.end_of_dataloader:\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ def _adjust_samples(tensor):\n+ return tensor[: dataloader.total_dataset_length - self.gradient_state.samples_seen]\n+\n+ return recursively_apply(_adjust_samples, tensor)\n+ else:\n+ # Not at the end of the dataloader, no need to adjust the tensors\n+ return tensor\n+ except:\n+ # Dataset had no length or raised an error\n+ return tensor\n+ return tensor\n+\n def reduce(self, tensor, reduction=\"sum\"):\n \"\"\"\n Reduce the values in *tensor* across all processes based on *reduction*.\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex fddfe8955..ba3338546 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -47,12 +47,18 @@ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n number of processes\n+\n+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n \n @property\n def total_batch_size(self):\n return self._loader.total_batch_size\n \n+ @property\n+ def total_dataset_length(self):\n+ return self._loader.total_dataset_length\n+\n \n logger = get_logger(__name__)\n \n@@ -311,6 +317,8 @@ class DataLoaderShard(DataLoader):\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n number of processes\n+\n+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n \n def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):\n@@ -324,11 +332,13 @@ def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n self.gradient_state._set_end_of_dataloader(False)\n+ self.gradient_state._set_samples_seen(0)\n dataloader_iter = super().__iter__()\n # We iterate one batch ahead to check when we are at the end\n try:\n current_batch = next(dataloader_iter)\n except StopIteration:\n+ self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\n yield\n while True:\n try:\n@@ -336,10 +346,12 @@ def __iter__(self):\n if self.device is not None:\n current_batch = send_to_device(current_batch, self.device)\n next_batch = next(dataloader_iter)\n+ self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\n yield current_batch\n current_batch = next_batch\n except StopIteration:\n self.gradient_state._set_end_of_dataloader(True)\n+ self.gradient_state._iterate_samples_seen(find_batch_size(current_batch))\n yield current_batch\n break\n \n@@ -351,6 +363,10 @@ def total_batch_size(self):\n else (self.batch_sampler.batch_size * self.batch_sampler.num_processes)\n )\n \n+ @property\n+ def total_dataset_length(self):\n+ return len(self.dataset)\n+\n \n class DataLoaderDispatcher(DataLoader):\n \"\"\"\n@@ -370,6 +386,8 @@ class DataLoaderDispatcher(DataLoader):\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n number of processes\n+\n+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n@@ -435,6 +453,7 @@ def _fetch_batches(self, iterator):\n \n def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n+ self.gradient_state._set_samples_seen(0)\n main_iterator = None\n if self.state.process_index == 0:\n # We only iterate through the DataLoader on process 0.\n@@ -466,12 +485,15 @@ def __iter__(self):\n \n data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\n next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ batch = slice_tensors(batch, data_slice)\n if not self._stop_iteration:\n- yield slice_tensors(batch, data_slice)\n+ self.gradient_state._iterate_samples_seen(batch_size)\n+ yield batch\n batch, batch_info, skip = next_batch, next_batch_info, next_skip\n else:\n+ self.gradient_state._iterate_samples_seen(batch_size)\n self.gradient_state._set_end_of_dataloader(True)\n- yield slice_tensors(batch, data_slice)\n+ yield batch\n break\n \n def __len__(self):\n@@ -489,6 +511,10 @@ def total_batch_size(self):\n self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)\n )\n \n+ @property\n+ def total_dataset_length(self):\n+ return len(self.dataset)\n+\n \n def prepare_data_loader(\n dataloader: DataLoader,\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 3c06968aa..6d3e7d3ff 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -255,10 +255,15 @@ def __init__(self):\n if not getattr(self, \"initialized\", False):\n self.sync_gradients = True\n self.end_of_dataloader = False\n+ self.samples_seen = 0\n self.initialized = True\n \n def __repr__(self):\n- return f\"Sync Gradients: {self.sync_gradients}\\n\" f\"At end of current dataloader: {self.end_of_dataloader}\\n\"\n+ return (\n+ f\"Sync Gradients: {self.sync_gradients}\\n\"\n+ f\"At end of current dataloader: {self.end_of_dataloader}\\n\"\n+ f\"Samples seen: {self.samples_seen}\"\n+ )\n \n def _set_sync_gradients(self, sync_gradients):\n \"Private function that sets whether gradients should be synchronized. Users should not have to call this.\"\n@@ -267,3 +272,11 @@ def _set_sync_gradients(self, sync_gradients):\n def _set_end_of_dataloader(self, end_of_dataloader):\n \"Private function that sets whether the end of the current dataloader has been reached. Users should not have to call this.\"\n self.end_of_dataloader = end_of_dataloader\n+\n+ def _set_samples_seen(self, samples_seen):\n+ \"Private function that sets the number of samples iterated over. Users should not have to call this.\"\n+ self.samples_seen = samples_seen\n+\n+ def _iterate_samples_seen(self, iteration: int = 1):\n+ \"Private function that iterates the number of samples seen by an iteration. Users should not have to call this.\"\n+ self._set_samples_seen(self.samples_seen + iteration)\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex 2d8527be2..d65099602 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -15,4 +15,4 @@\n from .training import RegressionDataset, RegressionModel\n \n \n-from .scripts import test_script, test_sync # isort:skip\n+from .scripts import test_metrics, test_script, test_sync # isort:skip\ndiff --git a/src/accelerate/test_utils/scripts/test_metrics.py b/src/accelerate/test_utils/scripts/test_metrics.py\nnew file mode 100644\nindex 000000000..442053213\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_metrics.py\n@@ -0,0 +1,98 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from copy import deepcopy\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import evaluate\n+from accelerate import Accelerator\n+from accelerate.test_utils import RegressionDataset, RegressionModel\n+from accelerate.utils import set_seed\n+\n+\n+def get_setup(accelerator):\n+ \"Returns everything needed to perform basic training\"\n+ set_seed(42)\n+ model = RegressionModel()\n+ ddp_model = deepcopy(model)\n+ dset = RegressionDataset(length=80)\n+ dataloader = DataLoader(dset, batch_size=16)\n+ model.to(accelerator.device)\n+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\n+ return model, ddp_model, dataloader\n+\n+\n+def accuracy(predictions, labels) -> float:\n+ \"\"\"\n+ Get the accuracy with respect to the most likely label\n+ \"\"\"\n+ return (predictions == labels).float().mean()\n+\n+\n+def test_torch_metrics():\n+ accelerator = Accelerator()\n+ model, ddp_model, dataloader = get_setup(accelerator)\n+ for batch in dataloader:\n+ ddp_input, ddp_target = batch.values()\n+ # First do single process\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ with torch.no_grad():\n+ logits = model(input)\n+ accuracy_single = accuracy(logits.argmax(dim=-1), target)\n+ # Then do multiprocess\n+ with torch.no_grad():\n+ logits = ddp_model(ddp_input)\n+ logits, target = accelerator.gather_for_metrics((logits, ddp_target), dataloader)\n+ accuracy_multi = accuracy(logits.argmax(dim=-1), target)\n+ assert torch.allclose(accuracy_single, accuracy_multi), \"The two accuracies were not the same!\"\n+\n+\n+def test_evaluate_metrics():\n+ metric = evaluate.load(\"accuracy\")\n+ accelerator = Accelerator()\n+ model, ddp_model, dataloader = get_setup(accelerator)\n+ for batch in dataloader:\n+ ddp_input, ddp_target = batch.values()\n+ # First do single process\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ with torch.no_grad():\n+ logits = model(input)\n+ accuracy_single = metric.compute(logits, target)\n+ # Then do multiprocess\n+ with torch.no_grad():\n+ logits = ddp_model(ddp_input)\n+ logits, target = accelerator.gather_for_metrics((logits, ddp_target), dataloader)\n+ accuracy_multi = metric.compute(logits, target)\n+ assert torch.allclose(accuracy_single, accuracy_multi), \"The two accuracies were not the same!\"\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ state = accelerator.state\n+ if state.local_process_index == 0:\n+ print(\"**Test torch metrics**\")\n+ test_torch_metrics()\n+\n+\n+def _mp_fn(index):\n+ # For xla_spawn (TPUs)\n+ main()\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/tests/test_metrics.py b/tests/test_metrics.py\nnew file mode 100644\nindex 000000000..c744a2fdb\n--- /dev/null\n+++ b/tests/test_metrics.py\n@@ -0,0 +1,55 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import os\n+import unittest\n+\n+import torch\n+\n+import accelerate\n+from accelerate import debug_launcher\n+from accelerate.test_utils import (\n+ execute_subprocess_async,\n+ require_cpu,\n+ require_multi_gpu,\n+ require_single_gpu,\n+ test_metrics,\n+)\n+from accelerate.utils import get_launch_prefix, patch_environment\n+\n+\n+class MetricTester(unittest.TestCase):\n+ def setUp(self):\n+ mod_file = inspect.getfile(accelerate.test_utils)\n+ self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_metrics.py\"])\n+\n+ @require_cpu\n+ def test_metric_cpu_noop(self):\n+ debug_launcher(test_metrics.main, num_processes=1)\n+\n+ @require_cpu\n+ def test_metric_cpu_multi(self):\n+ debug_launcher(test_metrics.main)\n+\n+ @require_single_gpu\n+ def test_metric_gpu(self):\n+ test_metrics.main()\n+\n+ @require_multi_gpu\n+ def test_metric_gpu_multi(self):\n+ print(f\"Found {torch.cuda.device_count()} devices.\")\n+ cmd = get_launch_prefix() + [f\"--nproc_per_node={torch.cuda.device_count()}\", self.test_file_path]\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n", "code_comments": [ { "body": "Maybe mention it that if the user doesn't want that behavior for some reason, they can just use `gather`?", "diff_hunk": "@@ -141,11 +140,14 @@ As for the training dataloader, passing your validation dataloader through\n Any instruction using your training dataloader length (for instance if you need the number of total training steps\n to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n \n+As some data at the end of the dataset may be duplicated so the batch can divide equally to all workers, metrics should be \n+calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "is this functionality in 0.11.0?", "from_author": false }, { "body": "It is not, we suggest not using it for now and doing the check manually as shown in the metric example script as some bugs were discovered: https://github.com/huggingface/accelerate/issues/575", "from_author": true }, { "body": "Cool, thank you, excited for this change when it happens", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/540", "pr_id": 1002401469 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 4014e3972..85b1f3237 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -65,7 +65,9 @@ def init_empty_weights(include_buffers: bool = False):\n def register_empty_parameter(module, name, param):\n old_register_parameter(module, name, param)\n if param is not None:\n- module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+ param_cls = type(module._parameters[name])\n+ kwargs = module._parameters[name].__dict__\n+ module._parameters[name] = param_cls(module._parameters[name].to(torch.device(\"meta\")), **kwargs)\n \n def register_empty_buffer(module, name, buffer):\n old_register_buffer(module, name, buffer)\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 62c4f9761..2dc85ef70 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -124,11 +124,16 @@ def set_module_tensor_to_device(\n new_value = value.to(device)\n else:\n new_value = torch.tensor(value, device=device)\n- if is_buffer:\n- module._buffers[tensor_name] = new_value\n- else:\n- new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n- module._parameters[tensor_name] = new_value\n+\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ elif module._parameters[tensor_name].device.type != \"cuda\":\n+ param_cls = type(module._parameters[tensor_name])\n+ kwargs = module._parameters[tensor_name].__dict__\n+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n+ module._parameters[tensor_name] = new_value\n+ else:\n+ module._parameters[tensor_name] = module._parameters[tensor_name].to(device)\n \n \n def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n", "code_comments": [ { "body": "This should stay at the previous level of indentation. To make sure there is absolutely zero breaking change, how about doing an `else` with just `module._parameters[tensor_name].to(device)` ?", "diff_hunk": "@@ -124,11 +124,14 @@ def set_module_tensor_to_device(\n new_value = value.to(device)\n else:\n new_value = torch.tensor(value, device=device)\n- if is_buffer:\n- module._buffers[tensor_name] = new_value\n- else:\n- new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n- module._parameters[tensor_name] = new_value\n+\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ elif module._parameters[tensor_name].device.type != \"cuda\":\n+ param_cls = type(module._parameters[tensor_name])\n+ kwargs = module._parameters[tensor_name].__dict__\n+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n+ module._parameters[tensor_name] = new_value", "from_author": false }, { "body": "Just added it and seems to work fine! πŸš€ \r\nCan we wait a bit before merging the PR?", "diff_hunk": "@@ -124,11 +124,14 @@ def set_module_tensor_to_device(\n new_value = value.to(device)\n else:\n new_value = torch.tensor(value, device=device)\n- if is_buffer:\n- module._buffers[tensor_name] = new_value\n- else:\n- new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n- module._parameters[tensor_name] = new_value\n+\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ elif module._parameters[tensor_name].device.type != \"cuda\":\n+ param_cls = type(module._parameters[tensor_name])\n+ kwargs = module._parameters[tensor_name].__dict__\n+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n+ module._parameters[tensor_name] = new_value", "from_author": true }, { "body": "Sure thing, let me know when it's good for you. Also don't forget to run `make style` on your branch.", "diff_hunk": "@@ -124,11 +124,14 @@ def set_module_tensor_to_device(\n new_value = value.to(device)\n else:\n new_value = torch.tensor(value, device=device)\n- if is_buffer:\n- module._buffers[tensor_name] = new_value\n- else:\n- new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n- module._parameters[tensor_name] = new_value\n+\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ elif module._parameters[tensor_name].device.type != \"cuda\":\n+ param_cls = type(module._parameters[tensor_name])\n+ kwargs = module._parameters[tensor_name].__dict__\n+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n+ module._parameters[tensor_name] = new_value", "from_author": false }, { "body": "Great thanks a lot!!", "diff_hunk": "@@ -124,11 +124,14 @@ def set_module_tensor_to_device(\n new_value = value.to(device)\n else:\n new_value = torch.tensor(value, device=device)\n- if is_buffer:\n- module._buffers[tensor_name] = new_value\n- else:\n- new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n- module._parameters[tensor_name] = new_value\n+\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ elif module._parameters[tensor_name].device.type != \"cuda\":\n+ param_cls = type(module._parameters[tensor_name])\n+ kwargs = module._parameters[tensor_name].__dict__\n+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)\n+ module._parameters[tensor_name] = new_value", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/539", "pr_id": 1001997559 }, { "diff": "diff --git a/docs/source/big_modeling.mdx b/docs/source/big_modeling.mdx\nindex 051d1bda7..006c8bc4e 100644\n--- a/docs/source/big_modeling.mdx\n+++ b/docs/source/big_modeling.mdx\n@@ -205,7 +205,7 @@ This only supports inference of your model, not training. Most of the computatio\n \n ## Designing a device map\n \n-You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map` to one of the supported options (`\"auto\"`, `\"balanced\"`, `\"balanced_low_0\"`, `\"sequential\"`) or create one yourself, if you want more control over where each layer should go.\n \n <Tip>\n \n@@ -213,6 +213,19 @@ You can derive all sizes of the model (and thus compute a `device_map`) on a mod\n \n </Tip>\n \n+All the options will produce the same result when you don't have enough GPU memory to accomodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). \n+\n+When you have more GPU memory available than the model size, here the difference between each option:\n+- `\"auto\"` and `\"balanced\"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1.\n+- `\"balanced_low_0\"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models\n+- `\"sequential\"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to).\n+\n+<Tip>\n+\n+The options `\"auto\"` and `\"balanced\"` produce the same results for now, but the behavior of `\"auto\"` might change in the future if we find a strategy that makes more sense, while `\"balanced\"` will stay stable.\n+\n+</Tip>\n+\n First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n \n Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM for the model weights:\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 907dfdac4..311111ad4 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -24,6 +24,7 @@\n OffloadedWeightsLoader,\n check_device_map,\n extract_submodules_state_dict,\n+ get_balanced_memory,\n infer_auto_device_map,\n load_checkpoint_in_model,\n offload_state_dict,\n@@ -278,7 +279,8 @@ def load_checkpoint_and_dispatch(\n A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n name, once a given module name is inside, every submodule of it will be sent to the same device.\n \n- To have Accelerate compute the most optimized `device_map` automatically, set `device_map=\"auto\"`.\n+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map=\"auto\"`. For more\n+ information about each option see [here](big_modeling#designing-a-device-map).\n max_memory (`Dict`, *optional*):\n A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU\n and the available CPU RAM if unset.\n@@ -302,7 +304,20 @@ def load_checkpoint_and_dispatch(\n called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n- if device_map == \"auto\":\n+ if isinstance(device_map, str) and device_map not in [\"auto\", \"balanced\", \"balanced_low_0\", \"sequential\"]:\n+ raise ValueError(\n+ \"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or \"\n+ \"'sequential'.\"\n+ )\n+ if device_map != \"sequential\":\n+ max_memory = get_balanced_memory(\n+ model,\n+ max_memory=max_memory,\n+ no_split_module_classes=no_split_module_classes,\n+ dtype=dtype,\n+ low_zero=(device_map == \"balanced_low_0\"),\n+ )\n+ if isinstance(device_map, str):\n device_map = infer_auto_device_map(\n model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype\n )\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 350efc899..f6b83d2f9 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -37,6 +37,7 @@\n convert_file_size_to_int,\n dtype_byte_size,\n find_tied_parameters,\n+ get_balanced_memory,\n get_max_layer_size,\n get_max_memory,\n infer_auto_device_map,\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 62c4f9761..e412ce0a0 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -326,6 +326,101 @@ def load_offloaded_weights(model, index, offload_folder):\n set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n \n \n+def get_balanced_memory(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+ low_zero: bool = False,\n+):\n+ \"\"\"\n+ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ dtype (`str` or `torch.dtype`, *optional*):\n+ If provided, the weights will be converted to that type when loaded.\n+ low_zero (`bool`, *optional*):\n+ Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the\n+ Transformers generate function).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+\n+ if not torch.cuda.is_available():\n+ return max_memory\n+\n+ num_devices = len([d for d in max_memory if torch.device(d).type == \"cuda\"])\n+ module_sizes = compute_module_sizes(model, dtype=dtype)\n+ per_gpu = module_sizes[\"\"] // (num_devices - 1 if low_zero else num_devices)\n+\n+ # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get\n+ # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to\n+ # add which is the biggest of:\n+ # - the size of no split block (if applicable)\n+ # - the mean of the layer sizes\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ # Identify the size of the no_split_block modules\n+ if len(no_split_module_classes) > 0:\n+ no_split_children = {}\n+ for name, size in module_sizes.items():\n+ if name == \"\":\n+ continue\n+ submodule = model\n+ for submodule_name in name.split(\".\"):\n+ submodule = getattr(submodule, submodule_name)\n+ class_name = submodule.__class__.__name__\n+ if class_name in no_split_module_classes and class_name not in no_split_children:\n+ no_split_children[class_name] = size\n+\n+ if set(no_split_children.keys()) == set(no_split_module_classes):\n+ break\n+ buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0\n+ else:\n+ buffer = 0\n+\n+ # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters\n+ leaves = [n for n in module_sizes if len([p for p in module_sizes if p.startswith(n) and len(p) > len(n)]) == 0]\n+ module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves}\n+ # Once removed, leaves are the final modules.\n+ leaves = [n for n in module_sizes if len([p for p in module_sizes if p.startswith(n) and len(p) > len(n)]) == 0]\n+ mean_leaves = int(sum([module_sizes[n] for n in leaves]) / len(leaves))\n+ buffer = int(1.25 * max(buffer, mean_leaves))\n+ if low_zero:\n+ per_gpu += buffer\n+ gpu_zero = 0\n+ else:\n+ gpu_zero = per_gpu\n+ per_gpu += buffer\n+\n+ max_memory = get_max_memory(max_memory)\n+ for i in range(num_devices):\n+ # We still leave slightly more space on GPU 0 and only apply the buffer on the other devices.\n+ max_memory[i] = min(gpu_zero if i == 0 else per_gpu, max_memory[i])\n+\n+ if low_zero:\n+ min_zero = max(0, module_sizes[\"\"] - sum([max_memory[i] for i in range(1, num_devices)]))\n+ max_memory[0] = min(min_zero, max_memory[0])\n+\n+ return max_memory\n+\n+\n def infer_auto_device_map(\n model: nn.Module,\n max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nindex 15e164218..799170257 100644\n--- a/tests/test_modeling_utils.py\n+++ b/tests/test_modeling_utils.py\n@@ -26,6 +26,7 @@\n clean_device_map,\n compute_module_sizes,\n find_tied_parameters,\n+ get_balanced_memory,\n infer_auto_device_map,\n load_checkpoint_in_model,\n named_module_tensors,\n@@ -358,3 +359,13 @@ def test_infer_auto_device_map(self):\n device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500})\n expected = {\"0\": 0, \"2.linear2\": 0, \"1\": 1, \"2.linear1\": 1, \"2.batchnorm\": 1}\n self.assertDictEqual(device_map, expected)\n+\n+ @require_cuda\n+ def test_get_balanced_memory(self):\n+ model = ModelForTest()\n+ # model has size 236: linear1 64, batchnorm 72, linear2 100\n+ max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200})\n+ self.assertDictEqual({0: 118, 1: 200}, max_memory)\n+\n+ max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300})\n+ self.assertDictEqual({0: 118, 1: 215}, max_memory)\n", "code_comments": [ { "body": "Docstring format nit", "diff_hunk": "@@ -326,6 +326,95 @@ def load_offloaded_weights(model, index, offload_folder):\n set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n \n \n+def get_balanced_memory(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+ low_zero: bool = False,\n+):\n+ \"\"\"\n+ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.", "from_author": false }, { "body": "I don't see any nit ;-) ", "diff_hunk": "@@ -326,6 +326,95 @@ def load_offloaded_weights(model, index, offload_folder):\n set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n \n \n+def get_balanced_memory(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+ low_zero: bool = False,\n+):\n+ \"\"\"\n+ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.", "from_author": true }, { "body": "If it resonates I would rename it to something more self-documenting? e.g. `minimize_gpu0_memory` or `minimize_first_gpu_memory` ", "diff_hunk": "@@ -326,6 +326,95 @@ def load_offloaded_weights(model, index, offload_folder):\n set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n \n \n+def get_balanced_memory(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+ low_zero: bool = False,", "from_author": false }, { "body": "```suggestion\r\nAll the options will produce the same result when you don't have enough GPU memory to accomodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). \r\n\r\nWhen you have more GPU memory available than the model size, here the difference between each option:\r\n```\r\nLet's add a break here", "diff_hunk": "@@ -205,14 +205,25 @@ This only supports inference of your model, not training. Most of the computatio\n \n ## Designing a device map\n \n-You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map` to one of the supported options (`\"auto\"`, `\"balanced\"`, `\"balanced_low_0\"`, `\"sequential\"`) or create one yourself, if you want more control over where each layer should go.\n \n <Tip>\n \n You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n \n </Tip>\n \n+All the options will produce the same result when you don't have enough GPU memory to accomodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). When you have more GPU memory available than the model size, here the difference between each option:", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Yes I will update it once the other PR is merged :-) ", "from_author": true }, { "body": "> This leads to equal allocation across all GPUs. Ideally it should be reworked to leave the gpu0 as close to empty as possible. So probably first trying to spread the weights across all but the first gpu, while leaving enough memory for activation calculation and temps. And only then assign any remaining weights to the first gpu.\r\n\r\nThat is exactly what the option `\"balanced_low_0\"` will do.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/534", "pr_id": 1000906620 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 09330eb63..806f0094d 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -16,6 +16,7 @@ jobs:\n test_core,\n test_big_modeling,\n test_deepspeed,\n+ test_fsdp,\n test_example_differences,\n test_checkpoint_step,\n test_checkpoint_epoch,\ndiff --git a/Makefile b/Makefile\nindex 4104f1bba..9cae9d74d 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -31,11 +31,15 @@ test_big_modeling:\n \tpython -m pytest -s -v ./tests/test_big_modeling.py\n \n test_core:\n-\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \\\n+\t--ignore=./tests/fsdp\n \n test_deepspeed:\n \tpython -m pytest -s -v ./tests/deepspeed\n \n+test_fsdp:\n+\tpython -m pytest -s -v ./tests/fsdp\n+\n test_examples:\n \tpython -m pytest -s -v ./tests/test_examples.py\n \ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 35afd6c5d..c961f5be0 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -31,6 +31,7 @@\n from .state import AcceleratorState, GradientState\n from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n+ MODEL_NAME,\n DeepSpeedPlugin,\n DistributedDataParallelKwargs,\n DistributedType,\n@@ -520,7 +521,8 @@ def prepare(self, *args):\n if model_count > 1 and optimizer_present:\n raise ValueError(\n \"For FSDP to work with multiple models (>1), \"\n- \"prepare must be called for all the models before optimizers are created\"\n+ \"prepare must be called for all the models before optimizers are created. \"\n+ \"Then pass the optimizers to the prepare call in the same order as corresponding models.\"\n )\n elif model_count == 1 and optimizer_present:\n logger.warn(\n@@ -596,6 +598,7 @@ def prepare_model(self, model):\n )\n if not fsdp_plugin.cpu_offload.offload_params:\n model.to(self.device)\n+ self._models[-1] = model\n elif self.distributed_type == DistributedType.MULTI_CPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n@@ -1042,9 +1045,44 @@ def save_state(self, output_dir: str):\n output_dir = os.path.expanduser(output_dir)\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving current state to {output_dir}\")\n- weights = [self.get_state_dict(m, unwrap=False) for m in self._models]\n+\n+ # Save the models taking care of FSDP and DeepSpeed nuances\n+ weights = []\n+ for i, model in enumerate(self._models):\n+ if self.distributed_type == DistributedType.FSDP:\n+ logger.info(\"Saving FSDP model\")\n+ self.state.fsdp_plugin.save_model(self, model, output_dir, i)\n+ logger.info(f\"FSDP Model saved to output dir {output_dir}\")\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\n+ logger.info(\"Saving DeepSpeed Model and Optimizer\")\n+ ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\n+ model.save_checkpoint(output_dir, ckpt_id)\n+ logger.info(f\"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}\")\n+ else:\n+ weights.append(self.get_state_dict(model, unwrap=False))\n+\n+ # Save the optimizers taking care of FSDP and DeepSpeed nuances\n+ optimizers = []\n+ if self.distributed_type == DistributedType.FSDP:\n+ for opt in self._optimizers:\n+ logger.info(\"Saving FSDP Optimizer\")\n+ self.state.fsdp_plugin.save_optimizer(self, opt, self._models[i], output_dir, i)\n+ logger.info(f\"FSDP Optimizer saved to output dir {output_dir}\")\n+ elif self.distributed_type != DistributedType.DEEPSPEED:\n+ optimizers = self._optimizers\n+\n+ # Save the lr schedulers taking care of DeepSpeed nuances\n+ schedulers = []\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ for i, scheduler in enumerate(self._schedulers):\n+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):\n+ continue\n+ schedulers.append(scheduler)\n+ else:\n+ schedulers = self._schedulers\n+\n save_location = save_accelerator_state(\n- output_dir, weights, self._optimizers, self._schedulers, self.state.process_index, self.scaler\n+ output_dir, weights, optimizers, schedulers, self.state.process_index, self.scaler\n )\n for i, obj in enumerate(self._custom_objects):\n save_custom_state(obj, output_dir, i)\n@@ -1063,9 +1101,43 @@ def load_state(self, input_dir: str):\n if not os.path.isdir(input_dir):\n raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n logger.info(f\"Loading states from {input_dir}\")\n- load_accelerator_state(\n- input_dir, self._models, self._optimizers, self._schedulers, self.state.process_index, self.scaler\n- )\n+\n+ # Load the models taking care of FSDP and DeepSpeed nuances\n+ models = []\n+ for i, model in enumerate(self._models):\n+ if self.distributed_type == DistributedType.FSDP:\n+ logger.info(\"Loading FSDP model\")\n+ self.state.fsdp_plugin.load_model(self, model, input_dir, i)\n+ logger.info(f\"FSDP Model loaded from input dir {input_dir}\")\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\n+ logger.info(\"Loading DeepSpeed Model and Optimizer\")\n+ ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\n+ model.load_checkpoint(input_dir, ckpt_id)\n+ logger.info(f\"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}\")\n+ else:\n+ models.append(model)\n+\n+ # Load the optimizers taking care of FSDP and DeepSpeed nuances\n+ optimizers = []\n+ if self.distributed_type == DistributedType.FSDP:\n+ for i, opt in enumerate(self._optimizers):\n+ logger.info(\"Loading FSDP Optimizer\")\n+ self.state.fsdp_plugin.load_optimizer(self, opt, self._models[i], input_dir, i)\n+ logger.info(f\"FSDP Optimizer loaded from input dir {input_dir}\")\n+ elif self.distributed_type != DistributedType.DEEPSPEED:\n+ optimizers = self._optimizers\n+\n+ # Load the lr schedulers taking care of DeepSpeed nuances\n+ schedulers = []\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ for i, scheduler in enumerate(self._schedulers):\n+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):\n+ continue\n+ schedulers.append(scheduler)\n+ else:\n+ schedulers = self._schedulers\n+\n+ load_accelerator_state(input_dir, models, optimizers, schedulers, self.state.process_index, self.scaler)\n custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n if len(custom_checkpoints) != len(self._custom_objects):\n err = \"Warning! Number of found checkpoints does not match the number of registered objects:\"\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex fd345fa1c..ba677e3eb 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -20,6 +20,7 @@\n FSDP_AUTO_WRAP_POLICY,\n FSDP_BACKWARD_PREFETCH,\n FSDP_SHARDING_STRATEGY,\n+ FSDP_STATE_DICT_TYPE,\n )\n from .config_args import ClusterConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n@@ -210,12 +211,12 @@ def get_cluster_input():\n for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"sharding_strategy\"] = _ask_field(\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(\n sharding_strategy_query,\n lambda x: int(x),\n default=1,\n )\n- fsdp_config[\"offload_params\"] = _ask_field(\n+ fsdp_config[\"fsdp_offload_params\"] = _ask_field(\n \"Do you want to offload parameters and gradients to CPU? [yes/NO]: \",\n _convert_yes_no_to_bool,\n default=False,\n@@ -228,15 +229,15 @@ def get_cluster_input():\n fsdp_config[\"fsdp_auto_wrap_policy\"] = _ask_field(\n fsdp_wrap_query,\n lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],\n- default=FSDP_AUTO_WRAP_POLICY[0],\n+ default=\"TRANSFORMER_BASED_WRAP\",\n )\n if fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[0]:\n- fsdp_config[\"transformer_layer_cls_to_wrap\"] = _ask_field(\n+ fsdp_config[\"fsdp_transformer_layer_cls_to_wrap\"] = _ask_field(\n \"What is the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...? : \",\n lambda x: str(x),\n )\n elif fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[1]:\n- fsdp_config[\"min_num_params\"] = _ask_field(\n+ fsdp_config[\"fsdp_min_num_params\"] = _ask_field(\n \"What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: \",\n lambda x: int(x),\n default=1e8,\n@@ -248,7 +249,16 @@ def get_cluster_input():\n fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_field(\n fsdp_backward_prefetch_query,\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n- default=FSDP_BACKWARD_PREFETCH[0],\n+ default=\"BACKWARD_PRE\",\n+ )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=\"FULL_STATE_DICT\",\n )\n \n if distributed_type == DistributedType.TPU:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex e6a23e46d..514e77d05 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -148,19 +148,19 @@ def launch_command_parser(subparsers=None):\n help=\"Whether to use fsdp.\",\n )\n parser.add_argument(\n- \"--offload_params\",\n+ \"--fsdp_offload_params\",\n default=\"false\",\n type=str,\n help=\"Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).\",\n )\n parser.add_argument(\n- \"--min_num_params\",\n+ \"--fsdp_min_num_params\",\n type=int,\n default=1e8,\n help=\"FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).\",\n )\n parser.add_argument(\n- \"--sharding_strategy\",\n+ \"--fsdp_sharding_strategy\",\n type=int,\n default=1,\n help=\"FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).\",\n@@ -172,7 +172,7 @@ def launch_command_parser(subparsers=None):\n help=\"FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).\",\n )\n parser.add_argument(\n- \"--transformer_layer_cls_to_wrap\",\n+ \"--fsdp_transformer_layer_cls_to_wrap\",\n default=None,\n type=str,\n help=\"Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... \"\n@@ -184,6 +184,36 @@ def launch_command_parser(subparsers=None):\n type=str,\n help=\"FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).\",\n )\n+ parser.add_argument(\n+ \"--fsdp_state_dict_type\",\n+ default=None,\n+ type=str,\n+ help=\"FSDP's state dict type. (useful only when `use_fsdp` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--offload_params\",\n+ default=None,\n+ type=str,\n+ help=\"This argument is deprecated. Use `fsdp_offload_params` instead.\",\n+ )\n+ parser.add_argument(\n+ \"--min_num_params\",\n+ type=int,\n+ default=None,\n+ help=\"This argument is deprecated. Use `fsdp_min_num_params` instead.\",\n+ )\n+ parser.add_argument(\n+ \"--sharding_strategy\",\n+ type=int,\n+ default=None,\n+ help=\"This argument is deprecated. Use `fsdp_sharding_strategy` instead.\",\n+ )\n+ parser.add_argument(\n+ \"--transformer_layer_cls_to_wrap\",\n+ default=None,\n+ type=str,\n+ help=\"This argument is deprecated. Use `fsdp_transformer_layer_cls_to_wrap` instead.\",\n+ )\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n@@ -360,13 +390,51 @@ def multi_gpu_launcher(args):\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n if args.use_fsdp:\n+ if args.sharding_strategy is not None:\n+ warnings.warn(\n+ \"`sharding_strategy` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n+ \" `fsdp_sharding_strategy` instead\",\n+ FutureWarning,\n+ )\n+ args.fsdp_sharding_strategy = args.sharding_strategy\n+\n+ if args.offload_params is not None:\n+ warnings.warn(\n+ \"`offload_params` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n+ \" `fsdp_offload_params` instead\",\n+ FutureWarning,\n+ )\n+ args.fsdp_offload_params = args.offload_params\n+\n+ if args.min_num_params is not None:\n+ warnings.warn(\n+ \"`min_num_params` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n+ \" `fsdp_min_num_params` instead\",\n+ FutureWarning,\n+ )\n+ args.fsdp_min_num_params = args.min_num_params\n+\n+ if args.transformer_layer_cls_to_wrap is not None:\n+ warnings.warn(\n+ \"`transformer_layer_cls_to_wrap` is deprecated and will be removed in version 0.13.0 of πŸ€— Accelerate. Use\"\n+ \" `fsdp_transformer_layer_cls_to_wrap` instead\",\n+ FutureWarning,\n+ )\n+ args.fsdp_transformer_layer_cls_to_wrap = args.transformer_layer_cls_to_wrap\n+\n current_env[\"USE_FSDP\"] = \"true\"\n- current_env[\"FSDP_AUTO_WRAP_POLICY\"] = str(args.fsdp_auto_wrap_policy)\n- current_env[\"FSDP_TRANSFORMER_CLS_TO_WRAP\"] = str(args.transformer_layer_cls_to_wrap)\n- current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.offload_params).lower()\n- current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.min_num_params)\n- current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.sharding_strategy)\n- current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n+ current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.fsdp_sharding_strategy)\n+ current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.fsdp_offload_params).lower()\n+ current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.fsdp_min_num_params)\n+ if args.fsdp_auto_wrap_policy is not None:\n+ current_env[\"FSDP_AUTO_WRAP_POLICY\"] = str(args.fsdp_auto_wrap_policy)\n+ if args.fsdp_transformer_layer_cls_to_wrap is not None:\n+ current_env[\"FSDP_TRANSFORMER_CLS_TO_WRAP\"] = str(args.fsdp_transformer_layer_cls_to_wrap)\n+ if args.fsdp_backward_prefetch_policy is not None:\n+ current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n+ if args.fsdp_state_dict_type is not None:\n+ current_env[\"FSDP_STATE_DICT_TYPE\"] = str(args.fsdp_state_dict_type)\n+\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n@@ -682,7 +750,10 @@ def launch_command(args):\n if getattr(args, k) is None:\n setattr(args, k, defaults.deepspeed_config[k])\n for k in defaults.fsdp_config:\n- setattr(args, k, defaults.fsdp_config[k])\n+ arg_to_set = k\n+ if \"fsdp\" not in arg_to_set:\n+ arg_to_set = \"fsdp_\" + arg_to_set\n+ setattr(args, arg_to_set, defaults.fsdp_config[k])\n continue\n \n # Those args are handled separately\ndiff --git a/src/accelerate/test_utils/scripts/test_checkpointing.py b/src/accelerate/test_utils/scripts/test_checkpointing.py\nnew file mode 100644\nindex 000000000..cde602dfa\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_checkpointing.py\n@@ -0,0 +1,269 @@\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import json\n+import os\n+\n+import torch\n+from torch.optim import AdamW\n+from torch.utils.data import DataLoader\n+\n+import evaluate\n+from accelerate import Accelerator, DistributedType\n+from accelerate.utils.deepspeed import DummyOptim, DummyScheduler\n+from datasets import load_dataset\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = \"bert-base-cased\"):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ model_name (`str`, *optional*):\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(model_name)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function, batched=True, remove_columns=[\"idx\", \"sentence1\", \"sentence2\"], load_from_cache_file=False\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def evaluation_loop(accelerator, model, eval_dataloader, metric):\n+ model.eval()\n+ samples_seen = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather(\n+ (predictions, batch[\"labels\"])\n+ ) # If we are in a multiprocess environment, the last batch has duplicates\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ samples_seen += references.shape[0]\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ return eval_metric[\"accuracy\"]\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ model_name = args.model_name_or_path\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)\n+\n+ # Instantiate optimizer\n+ optimizer_cls = (\n+ AdamW\n+ if accelerator.state.deepspeed_plugin is None\n+ or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ else DummyOptim\n+ )\n+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)\n+\n+ if accelerator.state.deepspeed_plugin is not None:\n+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[\n+ \"gradient_accumulation_steps\"\n+ ]\n+ else:\n+ gradient_accumulation_steps = 1\n+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps\n+\n+ # Instantiate scheduler\n+ if (\n+ accelerator.state.deepspeed_plugin is None\n+ or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ ):\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=max_training_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # We need to keep track of how many total steps we have iterated over\n+ overall_step = 0\n+ # We also need to keep track of the stating epoch so files are named properly\n+ starting_epoch = 0\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n+ ending_epoch = num_epochs\n+\n+ if args.partial_train_epoch is not None:\n+ ending_epoch = args.partial_train_epoch\n+\n+ if args.resume_from_checkpoint:\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ epoch_string = args.resume_from_checkpoint.split(\"epoch_\")[1]\n+ state_epoch_num = \"\"\n+ for char in epoch_string:\n+ if char.isdigit():\n+ state_epoch_num += char\n+ else:\n+ break\n+ starting_epoch = int(state_epoch_num) + 1\n+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)\n+ accelerator.print(\"resumed checkpoint performance:\", accuracy)\n+ accelerator.print(\"resumed checkpoint's scheduler's lr:\", lr_scheduler.get_lr()[0])\n+ accelerator.print(\"resumed optimizers's lr:\", optimizer.param_groups[0][\"lr\"])\n+ with open(os.path.join(args.output_dir, f\"state_{starting_epoch-1}.json\"), \"r\") as f:\n+ resumed_state = json.load(f)\n+ assert resumed_state[\"accuracy\"] == accuracy, \"Accuracy mismatch, loading from checkpoint failed\"\n+ assert (\n+ resumed_state[\"lr\"] == lr_scheduler.get_lr()[0]\n+ ), \"Scheduler learning rate mismatch, loading from checkpoint failed\"\n+ assert (\n+ resumed_state[\"optimizer_lr\"] == optimizer.param_groups[0][\"lr\"]\n+ ), \"Optimizer learning rate mismatch, loading from checkpoint failed\"\n+ assert resumed_state[\"epoch\"] == starting_epoch - 1, \"Epoch mismatch, loading from checkpoint failed\"\n+ return\n+\n+ # Now we train the model\n+ state = {}\n+ for epoch in range(starting_epoch, ending_epoch):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ overall_step += 1\n+ output_dir = f\"epoch_{epoch}\"\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)\n+ state[\"accuracy\"] = accuracy\n+ state[\"lr\"] = lr_scheduler.get_lr()[0]\n+ state[\"optimizer_lr\"] = optimizer.param_groups[0][\"lr\"]\n+ state[\"epoch\"] = epoch\n+ state[\"step\"] = overall_step\n+ accelerator.print(f\"epoch {epoch}:\", state)\n+\n+ accelerator.wait_for_everyone()\n+ if accelerator.is_main_process:\n+ with open(os.path.join(args.output_dir, f\"state_{epoch}.json\"), \"w\") as f:\n+ json.dump(state, f)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script tracking peak GPU memory usage.\")\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ default=\"bert-base-cased\",\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ parser.add_argument(\n+ \"--partial_train_epoch\",\n+ type=int,\n+ default=None,\n+ help=\"If passed, the training will stop after this number of epochs.\",\n+ )\n+ parser.add_argument(\n+ \"--num_epochs\",\n+ type=int,\n+ default=2,\n+ help=\"Number of train epochs.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": args.num_epochs, \"seed\": 42, \"batch_size\": 16}\n+\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/scripts/test_peak_memory_usage.py b/src/accelerate/test_utils/scripts/test_peak_memory_usage.py\nnew file mode 100644\nindex 000000000..7bb5ca3bf\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_peak_memory_usage.py\n@@ -0,0 +1,258 @@\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import json\n+import os\n+\n+import torch\n+from torch.optim import AdamW\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from accelerate.utils.deepspeed import DummyOptim, DummyScheduler\n+from datasets import load_dataset\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def get_dataloaders(\n+ accelerator: Accelerator,\n+ batch_size: int = 16,\n+ model_name: str = \"bert-base-cased\",\n+ n_train: int = 320,\n+ n_val: int = 160,\n+):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ model_name (`str`, *optional*):\n+ The name of the model to use.\n+ n_train (`int`, *optional*):\n+ The number of training examples to use.\n+ n_val (`int`, *optional*):\n+ The number of validation examples to use.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(model_name)\n+ datasets = load_dataset(\n+ \"glue\", \"mrpc\", split={\"train\": f\"train[:{n_train}]\", \"validation\": f\"validation[:{n_val}]\"}\n+ )\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function, batched=True, remove_columns=[\"idx\", \"sentence1\", \"sentence2\"], load_from_cache_file=False\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ model_name = args.model_name_or_path\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)\n+\n+ # Instantiate optimizer\n+ optimizer_cls = (\n+ AdamW\n+ if accelerator.state.deepspeed_plugin is None\n+ or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ else DummyOptim\n+ )\n+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)\n+\n+ if accelerator.state.deepspeed_plugin is not None:\n+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[\n+ \"gradient_accumulation_steps\"\n+ ]\n+ else:\n+ gradient_accumulation_steps = 1\n+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps\n+\n+ # Instantiate scheduler\n+ if (\n+ accelerator.state.deepspeed_plugin is None\n+ or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ ):\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=max_training_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # We need to keep track of how many total steps we have iterated over\n+ overall_step = 0\n+ # We also need to keep track of the stating epoch so files are named properly\n+ starting_epoch = 0\n+\n+ # Now we train the model\n+ train_total_peak_memory = {}\n+ for epoch in range(starting_epoch, num_epochs):\n+ with TorchTracemalloc() as tracemalloc:\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ overall_step += 1\n+\n+ # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage\n+ accelerator.print(\"Memory before entering the train : {}\".format(b2mb(tracemalloc.begin)))\n+ accelerator.print(\"Memory consumed at the end of the train (end-begin): {}\".format(tracemalloc.used))\n+ accelerator.print(\"Peak Memory consumed during the train (max-begin): {}\".format(tracemalloc.peaked))\n+ accelerator.print(\n+ \"Total Peak Memory consumed during the train (max): {}\".format(\n+ tracemalloc.peaked + b2mb(tracemalloc.begin)\n+ )\n+ )\n+ train_total_peak_memory[f\"epoch-{epoch}\"] = tracemalloc.peaked + b2mb(tracemalloc.begin)\n+ if args.peak_memory_upper_bound is not None:\n+ assert (\n+ train_total_peak_memory[f\"epoch-{epoch}\"] <= args.peak_memory_upper_bound\n+ ), \"Peak memory usage exceeded the upper bound\"\n+\n+ accelerator.wait_for_everyone()\n+ if accelerator.is_main_process:\n+ with open(os.path.join(args.output_dir, \"peak_memory_utilization.json\"), \"w\") as f:\n+ json.dump(train_total_peak_memory, f)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script tracking peak GPU memory usage.\")\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ default=\"bert-base-cased\",\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--peak_memory_upper_bound\",\n+ type=float,\n+ default=None,\n+ help=\"The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.\",\n+ )\n+ parser.add_argument(\n+ \"--n_train\",\n+ type=int,\n+ default=320,\n+ help=\"Number of training examples to use.\",\n+ )\n+ parser.add_argument(\n+ \"--n_val\",\n+ type=int,\n+ default=160,\n+ help=\"Number of validation examples to use.\",\n+ )\n+ parser.add_argument(\n+ \"--num_epochs\",\n+ type=int,\n+ default=1,\n+ help=\"Number of train epochs.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": args.num_epochs, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/scripts/test_performance.py b/src/accelerate/test_utils/scripts/test_performance.py\nnew file mode 100644\nindex 000000000..324a1854e\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_performance.py\n@@ -0,0 +1,231 @@\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import json\n+import os\n+\n+import torch\n+from torch.optim import AdamW\n+from torch.utils.data import DataLoader\n+\n+import evaluate\n+from accelerate import Accelerator, DistributedType\n+from accelerate.utils.deepspeed import DummyOptim, DummyScheduler\n+from datasets import load_dataset\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = \"bert-base-cased\"):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ model_name (`str`, *optional*):\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(model_name)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function, batched=True, remove_columns=[\"idx\", \"sentence1\", \"sentence2\"], load_from_cache_file=False\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ model_name = args.model_name_or_path\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)\n+\n+ # Instantiate optimizer\n+ optimizer_cls = (\n+ AdamW\n+ if accelerator.state.deepspeed_plugin is None\n+ or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ else DummyOptim\n+ )\n+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)\n+\n+ if accelerator.state.deepspeed_plugin is not None:\n+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[\n+ \"gradient_accumulation_steps\"\n+ ]\n+ else:\n+ gradient_accumulation_steps = 1\n+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps\n+\n+ # Instantiate scheduler\n+ if (\n+ accelerator.state.deepspeed_plugin is None\n+ or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ ):\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=max_training_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # We need to keep track of how many total steps we have iterated over\n+ overall_step = 0\n+ # We also need to keep track of the stating epoch so files are named properly\n+ starting_epoch = 0\n+\n+ # Now we train the model\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n+ best_performance = 0\n+ performance_metric = {}\n+ for epoch in range(starting_epoch, num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ overall_step += 1\n+\n+ model.eval()\n+ samples_seen = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather(\n+ (predictions, batch[\"labels\"])\n+ ) # If we are in a multiprocess environment, the last batch has duplicates\n+ if accelerator.use_distributed:\n+ if step == len(eval_dataloader) - 1:\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ samples_seen += references.shape[0]\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+ performance_metric[f\"epoch-{epoch}\"] = eval_metric[\"accuracy\"]\n+\n+ if best_performance < eval_metric[\"accuracy\"]:\n+ best_performance = eval_metric[\"accuracy\"]\n+\n+ if args.performance_lower_bound is not None:\n+ assert (\n+ args.performance_lower_bound <= best_performance\n+ ), f\"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}\"\n+\n+ accelerator.wait_for_everyone()\n+ if accelerator.is_main_process:\n+ with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\n+ json.dump(performance_metric, f)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script tracking peak GPU memory usage.\")\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ default=\"bert-base-cased\",\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--performance_lower_bound\",\n+ type=float,\n+ default=None,\n+ help=\"Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.\",\n+ )\n+ parser.add_argument(\n+ \"--num_epochs\",\n+ type=int,\n+ default=3,\n+ help=\"Number of train epochs.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": args.num_epochs, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 0b3145070..b5cccc1ff 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -32,6 +32,7 @@\n is_comet_ml_available,\n is_deepspeed_available,\n is_tensorboard_available,\n+ is_torch_version,\n is_tpu_available,\n is_wandb_available,\n )\n@@ -108,6 +109,13 @@ def require_deepspeed(test_case):\n return unittest.skipUnless(is_deepspeed_available(), \"test requires DeepSpeed\")(test_case)\n \n \n+def require_fsdp(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed\n+ \"\"\"\n+ return unittest.skipUnless(is_torch_version(\">=\", \"1.12.0\"), \"test requires torch version >= 1.12.0\")(test_case)\n+\n+\n def require_tensorboard(test_case):\n \"\"\"\n Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex 00556cad6..4e7c71853 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -27,6 +27,7 @@\n FSDP_SHARDING_STRATEGY = [\"FULL_SHARD\", \"SHARD_GRAD_OP\", \"NO_SHARD\"]\n FSDP_AUTO_WRAP_POLICY = [\"TRANSFORMER_BASED_WRAP\", \"SIZE_BASED_WRAP\", \"NO_WRAP\"]\n FSDP_BACKWARD_PREFETCH = [\"BACKWARD_PRE\", \"BACKWARD_POST\", \"NO_PREFETCH\"]\n+FSDP_STATE_DICT_TYPE = [\"FULL_STATE_DICT\", \"LOCAL_STATE_DICT\", \"SHARDED_STATE_DICT\"]\n DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\n \n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 8c8bdf106..350d95320 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -28,7 +28,7 @@\n \n import torch\n \n-from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH\n+from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE, MODEL_NAME, OPTIMIZER_NAME\n \n \n class KwargsHandler:\n@@ -455,8 +455,28 @@ class FullyShardedDataParallelPlugin:\n metadata={\"help\": \"A list of modules to ignore for FSDP.\"},\n )\n \n+ state_dict_type: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`\"\n+ },\n+ )\n+\n+ state_dict_config: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`\"\n+ },\n+ )\n+\n def __post_init__(self):\n- from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import (\n+ BackwardPrefetch,\n+ CPUOffload,\n+ ShardingStrategy,\n+ StateDictType,\n+ _state_dict_type_to_config,\n+ )\n \n if self.sharding_strategy is None:\n self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n@@ -468,10 +488,21 @@ def __post_init__(self):\n self.cpu_offload = CPUOffload(offload_params=False)\n \n if self.backward_prefetch is None:\n- prefetch_policy = os.environ.get(\"FSDP_BACKWARD_PREFETCH\", FSDP_BACKWARD_PREFETCH[-1])\n+ prefetch_policy = os.environ.get(\"FSDP_BACKWARD_PREFETCH\", \"NO_PREFETCH\")\n if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:\n self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)\n \n+ if self.state_dict_type is None:\n+ state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", \"FULL_STATE_DICT\")\n+ self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)\n+\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n+ self.state_dict_config = _state_dict_type_to_config[self.state_dict_type](\n+ offload_to_cpu=True, rank0_only=True\n+ )\n+ else:\n+ self.state_dict_config = _state_dict_type_to_config[self.state_dict_type]()\n+\n @staticmethod\n def get_module_class_from_name(module, name):\n \"\"\"\n@@ -496,7 +527,7 @@ def set_auto_wrap_policy(self, model):\n from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy\n \n if self.auto_wrap_policy is None:\n- auto_wrap_policy = os.environ.get(\"FSDP_AUTO_WRAP_POLICY\", FSDP_AUTO_WRAP_POLICY[-1])\n+ auto_wrap_policy = os.environ.get(\"FSDP_AUTO_WRAP_POLICY\", \"NO_WRAP\")\n if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:\n transformer_cls_to_wrap = os.environ.get(\"FSDP_TRANSFORMER_CLS_TO_WRAP\", \"\")\n transformer_cls_to_wrap = FullyShardedDataParallelPlugin.get_module_class_from_name(\n@@ -527,3 +558,84 @@ def set_mixed_precision(self, mixed_precision):\n \n if self.mixed_precision_policy is None:\n self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)\n+\n+ def save_model(self, accelerator, model, output_dir, model_index=0):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n+\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n+ with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n+ state_dict = model.state_dict()\n+ weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ if accelerator.process_index == 0:\n+ print(f\"Saving model to {output_model_file}\")\n+ torch.save(state_dict, output_model_file)\n+ print(f\"Model saved to {output_model_file}\")\n+ else:\n+ with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n+ state_dict = model.state_dict()\n+ weights_name = (\n+ f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n+ if model_index == 0\n+ else f\"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin\"\n+ )\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ print(f\"Saving model to {output_model_file}\")\n+ torch.save(state_dict, output_model_file)\n+ print(f\"Model saved to {output_model_file}\")\n+\n+ def load_model(self, accelerator, model, input_dir, model_index=0):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n+\n+ accelerator.wait_for_everyone()\n+\n+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:\n+ weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n+ input_model_file = os.path.join(input_dir, weights_name)\n+ accelerator.print(f\"Loading model from {input_model_file}\")\n+ state_dict = torch.load(input_model_file)\n+ accelerator.print(f\"Model loaded from {input_model_file}\")\n+ else:\n+ weights_name = (\n+ f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n+ if model_index == 0\n+ else f\"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin\"\n+ )\n+ input_model_file = os.path.join(input_dir, weights_name)\n+ print(f\"Loading model from {input_model_file}\")\n+ state_dict = torch.load(input_model_file)\n+ print(f\"Model loaded from {input_model_file}\")\n+ with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):\n+ model.load_state_dict(state_dict)\n+\n+ def save_optimizer(self, accelerator, optimizer, model, output_dir, optimizer_index=0, optim_input=None):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+\n+ optim_state = FSDP.full_optim_state_dict(model, optimizer, optim_input=optim_input)\n+ if accelerator.process_index == 0:\n+ optim_state_name = (\n+ f\"{OPTIMIZER_NAME}.bin\" if optimizer_index == 0 else f\"{OPTIMIZER_NAME}_{optimizer_index}.bin\"\n+ )\n+ output_optimizer_file = os.path.join(output_dir, optim_state_name)\n+ print(f\"Saving Optimizer state to {output_optimizer_file}\")\n+ torch.save(optim_state, output_optimizer_file)\n+ print(f\"Optimizer state saved in {output_optimizer_file}\")\n+\n+ def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_index=0):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+\n+ accelerator.wait_for_everyone()\n+ full_osd = None\n+ if accelerator.process_index == 0:\n+ optimizer_name = (\n+ f\"{OPTIMIZER_NAME}.bin\" if optimizer_index == 0 else f\"{OPTIMIZER_NAME}_{optimizer_index}.bin\"\n+ )\n+ input_optimizer_file = os.path.join(input_dir, optimizer_name)\n+ print(f\"Loading Optimizer state from {input_optimizer_file}\")\n+ full_osd = torch.load(input_optimizer_file)\n+ print(f\"Optimizer state loaded from {input_optimizer_file}\")\n+ # called from all ranks, though only rank0 has a valid param for full_osd\n+ sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model)\n+ optimizer.load_state_dict(sharded_osd)\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 7b3c8de5a..42868a0a5 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -254,7 +254,11 @@ def gather_object(object: Any):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n raise NotImplementedError(\"gather objects in TPU is not supported\")\n- elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ elif AcceleratorState().distributed_type in [\n+ DistributedType.DEEPSPEED,\n+ DistributedType.MULTI_GPU,\n+ DistributedType.FSDP,\n+ ]:\n return _gpu_gather_object(object)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather_object(object)\n@@ -293,7 +297,11 @@ def broadcast(tensor, from_process: int = 0):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_broadcast(tensor, src=from_process, name=\"accelerate.utils.broadcast\")\n- elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ elif AcceleratorState().distributed_type in [\n+ DistributedType.DEEPSPEED,\n+ DistributedType.MULTI_GPU,\n+ DistributedType.FSDP,\n+ ]:\n return _gpu_broadcast(tensor, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _gpu_broadcast(tensor, src=from_process)\n@@ -317,7 +325,11 @@ def broadcast_object_list(object_list, from_process: int = 0):\n if AcceleratorState().distributed_type == DistributedType.TPU:\n for i, obj in enumerate(object_list):\n object_list[i] = xm.mesh_reduce(\"accelerate.utils.broadcast_object_list\", obj, lambda x: x[from_process])\n- elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ elif AcceleratorState().distributed_type in [\n+ DistributedType.DEEPSPEED,\n+ DistributedType.MULTI_GPU,\n+ DistributedType.FSDP,\n+ ]:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n@@ -433,7 +445,11 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\n if state.distributed_type == DistributedType.TPU:\n xm.all_reduce(\"sum\", cloned_tensor)\n return cloned_tensor\n- elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ elif state.distributed_type in [\n+ DistributedType.DEEPSPEED,\n+ DistributedType.MULTI_GPU,\n+ DistributedType.FSDP,\n+ ]:\n torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\n return cloned_tensor\n else:\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex 206c70589..ff360038d 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -65,6 +65,7 @@ def wait_for_everyone():\n AcceleratorState().distributed_type == DistributedType.MULTI_GPU\n or AcceleratorState().distributed_type == DistributedType.MULTI_CPU\n or AcceleratorState().distributed_type == DistributedType.DEEPSPEED\n+ or AcceleratorState().distributed_type == DistributedType.FSDP\n ):\n torch.distributed.barrier()\n elif AcceleratorState().distributed_type == DistributedType.TPU:\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 12cca415c..6b37eb93e 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -25,10 +25,18 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import accelerate\n from accelerate.accelerator import Accelerator\n from accelerate.scheduler import AcceleratedScheduler\n from accelerate.state import AcceleratorState\n-from accelerate.test_utils.testing import require_cuda, require_deepspeed\n+from accelerate.test_utils.testing import (\n+ TempDirTestCase,\n+ execute_subprocess_async,\n+ require_cuda,\n+ require_deepspeed,\n+ require_multi_gpu,\n+ slow,\n+)\n from accelerate.test_utils.training import RegressionDataset\n from accelerate.utils.dataclasses import DeepSpeedPlugin\n from accelerate.utils.deepspeed import (\n@@ -38,6 +46,7 @@\n DummyOptim,\n DummyScheduler,\n )\n+from accelerate.utils.other import patch_environment\n from parameterized import parameterized\n from transformers import AutoModel, AutoModelForCausalLM, get_scheduler\n from transformers.testing_utils import mockenv_context\n@@ -118,6 +127,10 @@ def setUp(self):\n WORLD_SIZE=\"1\",\n )\n \n+ def tearDown(self):\n+ super().tearDown()\n+ AcceleratorState._reset_state()\n+\n def get_config_dict(self, stage):\n # As some tests modify the dict, always make a copy\n return deepcopy(self.ds_config_dict[stage])\n@@ -260,11 +273,10 @@ def test_init_zero3(self):\n )\n \n with mockenv_context(**self.dist_env):\n- accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin) # noqa: F841\n from transformers.deepspeed import is_deepspeed_zero3_enabled\n \n self.assertTrue(is_deepspeed_zero3_enabled())\n- accelerator.state.initialized = False\n \n @parameterized.expand(optim_scheduler_params, name_func=parameterized_custom_name_func)\n def test_prepare_deepspeed(self, optim_type, scheduler_type):\n@@ -479,7 +491,6 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n \"You can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`.\"\n in str(cm.exception)\n )\n- accelerator.state.initialized = False\n \n def test_save_checkpoints(self):\n deepspeed_plugin = DeepSpeedPlugin(\n@@ -533,7 +544,6 @@ def test_save_checkpoints(self):\n \"To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.\"\n )\n self.assertTrue(msg in str(cm.exception))\n- accelerator.state.initialized = False\n \n def test_autofill_dsconfig(self):\n deepspeed_plugin = DeepSpeedPlugin(\n@@ -581,4 +591,213 @@ def test_autofill_dsconfig(self):\n self.assertFalse(\n accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\n )\n- accelerator.state.initialized = False\n+\n+ def test_basic_run(self):\n+ mod_file = inspect.getfile(accelerate.test_utils)\n+ test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_performance.py\"])\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ cmd = [\n+ \"accelerate\",\n+ \"launch\",\n+ \"--num_processes=1\",\n+ \"--num_machines=1\",\n+ \"--machine_rank=0\",\n+ \"--mixed_precision=fp16\",\n+ \"--use_deepspeed\",\n+ \"--gradient_accumulation_steps=1\",\n+ \"--zero_stage=2\",\n+ \"--offload_optimizer_device=none\",\n+ \"--offload_param_device=none\",\n+ test_file_path,\n+ \"--model_name_or_path=distilbert-base-uncased\",\n+ \"--num_epochs=1\",\n+ f\"--output_dir={dirpath}\",\n+ ]\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n+\n+\n+@require_deepspeed\n+@require_multi_gpu\n+@slow\n+class DeepSpeedIntegrationTest(TempDirTestCase):\n+ def setUp(self):\n+ super().setUp()\n+ self._test_file_path = inspect.getfile(self.__class__)\n+ path = Path(self._test_file_path).resolve()\n+ self.test_file_dir_str = str(path.parents[0])\n+\n+ self.ds_config_file = dict(\n+ zero2=f\"{self.test_file_dir_str}/ds_config_zero2.json\",\n+ zero3=f\"{self.test_file_dir_str}/ds_config_zero3.json\",\n+ )\n+\n+ self.stages = [1, 2, 3]\n+ self.zero3_offload_config = False\n+ self.performance_lower_bound = 0.83\n+ self.peak_memory_usage_upper_bound = {\n+ \"multi_gpu_fp16\": 3200,\n+ \"deepspeed_stage_1_fp16\": 1600,\n+ \"deepspeed_stage_2_fp16\": 2500,\n+ \"deepspeed_stage_3_zero_init_fp16\": 2800,\n+ \"deepspeed_stage_3_cpu_offload_fp16\": 1900,\n+ }\n+ self.n_train = 160\n+ self.n_val = 160\n+\n+ mod_file = inspect.getfile(accelerate.test_utils)\n+ self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\"])\n+\n+ def test_performance(self):\n+ self.test_file_path = os.path.join(self.test_scripts_folder, \"test_performance.py\")\n+ cmd = [\n+ \"accelerate\",\n+ \"launch\",\n+ \"--num_processes=2\",\n+ \"--num_machines=1\",\n+ \"--machine_rank=0\",\n+ \"--mixed_precision=fp16\",\n+ \"--use_deepspeed\",\n+ \"--gradient_accumulation_steps=1\",\n+ \"--gradient_clipping=1\",\n+ \"--zero3_init_flag=True\",\n+ \"--zero3_save_16bit_model=True\",\n+ ]\n+ for stage in self.stages:\n+ if stage == 1:\n+ continue\n+ cmd_stage = cmd.copy()\n+ cmd_stage.extend([f\"--zero_stage={stage}\"])\n+ cmd_stage.extend([\"--offload_optimizer_device=none\", \"--offload_param_device=none\"])\n+ if self.zero3_offload_config:\n+ with io.open(self.ds_config_file[ZERO3], \"r\", encoding=\"utf-8\") as f:\n+ ds_config = json.load(f)\n+ del ds_config[\"bf16\"]\n+ del ds_config[\"optimizer\"][\"params\"][\"torch_adam\"]\n+ del ds_config[\"optimizer\"][\"params\"][\"adam_w_mode\"]\n+ ds_config[\"fp16\"][\"enabled\"] = True\n+ ds_config_path = os.path.join(self.tmpdir, \"ds_config.json\")\n+ with open(ds_config_path, \"w\") as out_file:\n+ json.dump(ds_config, out_file)\n+\n+ cmd_stage.extend([f\"--deepspeed_config_file={ds_config_path}\"])\n+\n+ cmd_stage.extend(\n+ [\n+ self.test_file_path,\n+ f\"--output_dir={self.tmpdir}\",\n+ f\"--performance_lower_bound={self.performance_lower_bound}\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_stage, env=os.environ.copy())\n+\n+ def test_checkpointing(self):\n+ self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\n+ cmd = [\n+ \"accelerate\",\n+ \"launch\",\n+ \"--num_processes=2\",\n+ \"--num_machines=1\",\n+ \"--machine_rank=0\",\n+ \"--mixed_precision=fp16\",\n+ \"--use_deepspeed\",\n+ \"--gradient_accumulation_steps=1\",\n+ \"--gradient_clipping=1\",\n+ \"--zero3_init_flag=True\",\n+ \"--zero3_save_16bit_model=True\",\n+ ]\n+ for stage in self.stages:\n+ if stage == 1:\n+ continue\n+ cmd_stage = cmd.copy()\n+ cmd_stage.extend([f\"--zero_stage={stage}\"])\n+ cmd_stage.extend([\"--offload_optimizer_device=none\", \"--offload_param_device=none\"])\n+ if self.zero3_offload_config:\n+ with io.open(self.ds_config_file[ZERO3], \"r\", encoding=\"utf-8\") as f:\n+ ds_config = json.load(f)\n+ del ds_config[\"bf16\"]\n+ del ds_config[\"optimizer\"][\"params\"][\"torch_adam\"]\n+ del ds_config[\"optimizer\"][\"params\"][\"adam_w_mode\"]\n+ ds_config[\"fp16\"][\"enabled\"] = True\n+ ds_config_path = os.path.join(self.tmpdir, \"ds_config.json\")\n+ with open(ds_config_path, \"w\") as out_file:\n+ json.dump(ds_config, out_file)\n+\n+ cmd_stage.extend([f\"--deepspeed_config_file={ds_config_path}\"])\n+\n+ cmd_stage.extend(\n+ [\n+ self.test_file_path,\n+ f\"--output_dir={self.tmpdir}\",\n+ \"--partial_train_epoch=1\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_stage, env=os.environ.copy())\n+\n+ cmd_stage = cmd_stage[:-1]\n+ resume_from_checkpoint = os.path.join(self.tmpdir, \"epoch_0\")\n+ cmd_stage.extend(\n+ [\n+ f\"--resume_from_checkpoint={resume_from_checkpoint}\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_stage, env=os.environ.copy())\n+\n+ def test_peak_memory_usage(self):\n+ self.test_file_path = os.path.join(self.test_scripts_folder, \"test_peak_memory_usage.py\")\n+ cmd = [\n+ \"accelerate\",\n+ \"launch\",\n+ \"--num_processes=2\",\n+ \"--num_machines=1\",\n+ \"--machine_rank=0\",\n+ ]\n+ for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():\n+ cmd_stage = cmd.copy()\n+ if \"fp16\" in spec:\n+ cmd_stage.extend([\"--mixed_precision=fp16\"])\n+\n+ if \"multi_gpu\" in spec:\n+ continue\n+ else:\n+ cmd_stage.extend(\n+ [\n+ \"--use_deepspeed\",\n+ \"--gradient_accumulation_steps=1\",\n+ \"--gradient_clipping=1\",\n+ \"--zero3_init_flag=True\",\n+ \"--zero3_save_16bit_model=True\",\n+ ]\n+ )\n+ for i in range(3):\n+ if f\"stage_{i+1}\" in spec:\n+ cmd_stage.extend([f\"--zero_stage={i+1}\"])\n+ break\n+ cmd_stage.extend([\"--offload_optimizer_device=none\", \"--offload_param_device=none\"])\n+ if \"cpu_offload\" in spec:\n+ with io.open(self.ds_config_file[ZERO3], \"r\", encoding=\"utf-8\") as f:\n+ ds_config = json.load(f)\n+ del ds_config[\"bf16\"]\n+ del ds_config[\"fp16\"]\n+ del ds_config[\"optimizer\"][\"params\"][\"torch_adam\"]\n+ del ds_config[\"optimizer\"][\"params\"][\"adam_w_mode\"]\n+ ds_config_path = os.path.join(self.tmpdir, \"ds_config.json\")\n+ with open(ds_config_path, \"w\") as out_file:\n+ json.dump(ds_config, out_file)\n+\n+ cmd_stage.extend([f\"--deepspeed_config_file={ds_config_path}\"])\n+\n+ cmd_stage.extend(\n+ [\n+ self.test_file_path,\n+ f\"--output_dir={self.tmpdir}\",\n+ f\"--peak_memory_upper_bound={peak_mem_upper_bound}\",\n+ f\"--n_train={self.n_train}\",\n+ f\"--n_val={self.n_val}\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_stage, env=os.environ.copy())\ndiff --git a/tests/fsdp/test_fsdp.py b/tests/fsdp/test_fsdp.py\nnew file mode 100644\nindex 000000000..8ad088c04\n--- /dev/null\n+++ b/tests/fsdp/test_fsdp.py\n@@ -0,0 +1,332 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import inspect\n+import os\n+import unittest\n+\n+import torch\n+\n+import accelerate\n+from accelerate.accelerator import Accelerator\n+from accelerate.state import AcceleratorState\n+from accelerate.test_utils.testing import (\n+ TempDirTestCase,\n+ execute_subprocess_async,\n+ require_cuda,\n+ require_fsdp,\n+ require_multi_gpu,\n+ slow,\n+)\n+from accelerate.utils.constants import (\n+ FSDP_AUTO_WRAP_POLICY,\n+ FSDP_BACKWARD_PREFETCH,\n+ FSDP_SHARDING_STRATEGY,\n+ FSDP_STATE_DICT_TYPE,\n+)\n+from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin\n+from accelerate.utils.other import patch_environment\n+from transformers import AutoModel\n+from transformers.testing_utils import mockenv_context\n+from transformers.trainer_utils import set_seed\n+\n+\n+set_seed(42)\n+\n+BERT_BASE_CASED = \"bert-base-cased\"\n+FP16 = \"fp16\"\n+BF16 = \"bf16\"\n+dtypes = [FP16, BF16]\n+\n+\n+@require_fsdp\n+@require_cuda\n+class FSDPPluginIntegration(unittest.TestCase):\n+ def setUp(self):\n+ super().setUp()\n+\n+ self.dist_env = dict(\n+ USE_FSDP=\"true\",\n+ MASTER_ADDR=\"localhost\",\n+ MASTER_PORT=\"10999\",\n+ RANK=\"0\",\n+ LOCAL_RANK=\"0\",\n+ WORLD_SIZE=\"1\",\n+ )\n+\n+ def tearDown(self):\n+ super().tearDown()\n+ AcceleratorState._reset_state()\n+\n+ def test_sharding_strategy(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy\n+\n+ for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n+ env = self.dist_env.copy()\n+ env[\"FSDP_SHARDING_STRATEGY\"] = f\"{i + 1}\"\n+ env[\"FSDP_SHARDING_STRATEGY_NAME\"] = strategy\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1))\n+\n+ def test_backward_prefetch(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch\n+\n+ for i, prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):\n+ env = self.dist_env.copy()\n+ env[\"FSDP_BACKWARD_PREFETCH\"] = prefetch_policy\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ if prefetch_policy == \"NO_PREFETCH\":\n+ self.assertIsNone(fsdp_plugin.backward_prefetch)\n+ else:\n+ self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1))\n+\n+ def test_state_dict_type(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType, _state_dict_type_to_config\n+\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ env = self.dist_env.copy()\n+ env[\"FSDP_STATE_DICT_TYPE\"] = state_dict_type\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1))\n+ self.assertEqual(\n+ type(fsdp_plugin.state_dict_config), type(_state_dict_type_to_config[StateDictType(i + 1)]())\n+ )\n+ if state_dict_type == \"FULL_STATE_DICT\":\n+ self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)\n+ self.assertTrue(fsdp_plugin.state_dict_config.rank0_only)\n+\n+ def test_auto_wrap_policy(self):\n+ model = AutoModel.from_pretrained(BERT_BASE_CASED)\n+ for policy in FSDP_AUTO_WRAP_POLICY:\n+ env = self.dist_env.copy()\n+ env[\"FSDP_AUTO_WRAP_POLICY\"] = policy\n+ if policy == \"TRANSFORMER_BASED_WRAP\":\n+ env[\"FSDP_TRANSFORMER_CLS_TO_WRAP\"] = \"BertLayer\"\n+ elif policy == \"SIZE_BASED_WRAP\":\n+ env[\"FSDP_MIN_NUM_PARAMS\"] = \"2000\"\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ fsdp_plugin.set_auto_wrap_policy(model)\n+ if policy == \"NO_WRAP\":\n+ self.assertIsNone(fsdp_plugin.auto_wrap_policy)\n+ else:\n+ self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)\n+\n+ env = self.dist_env.copy()\n+ env[\"FSDP_AUTO_WRAP_POLICY\"] = \"TRANSFORMER_BASED_WRAP\"\n+ env[\"FSDP_TRANSFORMER_CLS_TO_WRAP\"] = \"T5Layer\"\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ with self.assertRaises(Exception) as cm:\n+ fsdp_plugin.set_auto_wrap_policy(model)\n+ self.assertTrue(\"Could not find the transformer layer class to wrap in the model.\" in str(cm.exception))\n+\n+ env = self.dist_env.copy()\n+ env[\"FSDP_AUTO_WRAP_POLICY\"] = \"SIZE_BASED_WRAP\"\n+ env[\"FSDP_MIN_NUM_PARAMS\"] = \"0\"\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ fsdp_plugin.set_auto_wrap_policy(model)\n+ self.assertIsNone(fsdp_plugin.auto_wrap_policy)\n+\n+ def test_mixed_precision(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision\n+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n+\n+ for mp_dtype in dtypes:\n+ env = self.dist_env.copy()\n+ env[\"MIXED_PRECISION\"] = mp_dtype\n+ with mockenv_context(**env):\n+ accelerator = Accelerator()\n+ if mp_dtype == \"fp16\":\n+ dtype = torch.float16\n+ elif mp_dtype == \"bf16\":\n+ dtype = torch.bfloat16\n+ mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)\n+ self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, mp_policy)\n+ if mp_dtype == FP16:\n+ self.assertTrue(isinstance(accelerator.scaler, ShardedGradScaler))\n+ elif mp_dtype == BF16:\n+ self.assertIsNone(accelerator.scaler)\n+ AcceleratorState._reset_state()\n+\n+ def test_cpu_offload(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload\n+\n+ for flag in [True, False]:\n+ env = self.dist_env.copy()\n+ env[\"FSDP_OFFLOAD_PARAMS\"] = str(flag).lower()\n+ with mockenv_context(**env):\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=flag))\n+\n+\n+@require_fsdp\n+@require_multi_gpu\n+@slow\n+class FSDPIntegrationTest(TempDirTestCase):\n+ def setUp(self):\n+ super().setUp()\n+ self.performance_lower_bound = 0.83\n+ self.performance_configs = [\n+ \"fsdp_shard_grad_op_transformer_based_wrap\",\n+ \"fsdp_full_shard_transformer_based_wrap\",\n+ ]\n+ self.peak_memory_usage_upper_bound = {\n+ \"multi_gpu_fp16\": 3200,\n+ \"fsdp_shard_grad_op_transformer_based_wrap_fp16\": 2000,\n+ \"fsdp_full_shard_transformer_based_wrap_fp16\": 1900,\n+ \"fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32\": 1500, # fp16 was leading to indefinite hang\n+ }\n+ self.n_train = 160\n+ self.n_val = 160\n+\n+ mod_file = inspect.getfile(accelerate.test_utils)\n+ self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\"])\n+\n+ def test_performance(self):\n+ self.test_file_path = os.path.join(self.test_scripts_folder, \"test_performance.py\")\n+ cmd = [\"accelerate\", \"launch\", \"--num_processes=2\", \"--num_machines=1\", \"--machine_rank=0\", \"--use_fsdp\"]\n+ for config in self.performance_configs:\n+ cmd_config = cmd.copy()\n+ for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n+ if strategy.lower() in config:\n+ cmd_config.append(f\"--fsdp_sharding_strategy={i+1}\")\n+ break\n+\n+ if \"fp32\" in config:\n+ cmd_config.append(\"--mixed_precision=no\")\n+ else:\n+ cmd_config.append(\"--mixed_precision=fp16\")\n+\n+ if \"cpu_offload\" in config:\n+ cmd_config.append(\"--fsdp_offload_params=True\")\n+\n+ for policy in FSDP_AUTO_WRAP_POLICY:\n+ if policy.lower() in config:\n+ cmd_config.append(f\"--fsdp_auto_wrap_policy={policy}\")\n+ break\n+\n+ if policy == \"TRANSFORMER_BASED_WRAP\":\n+ cmd_config.append(\"--fsdp_transformer_layer_cls_to_wrap=BertLayer\")\n+ elif policy == \"SIZE_BASED_WRAP\":\n+ cmd_config.append(\"--fsdp_min_num_params=2000\")\n+\n+ cmd_config.extend(\n+ [\n+ self.test_file_path,\n+ f\"--output_dir={self.tmpdir}\",\n+ f\"--performance_lower_bound={self.performance_lower_bound}\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_config, env=os.environ.copy())\n+\n+ def test_checkpointing(self):\n+ self.test_file_path = os.path.join(self.test_scripts_folder, \"test_checkpointing.py\")\n+ cmd = [\n+ \"accelerate\",\n+ \"launch\",\n+ \"--num_processes=2\",\n+ \"--num_machines=1\",\n+ \"--machine_rank=0\",\n+ \"--use_fsdp\",\n+ \"--mixed_precision=fp16\",\n+ \"--fsdp_transformer_layer_cls_to_wrap=BertLayer\",\n+ ]\n+\n+ for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n+ cmd_config = cmd.copy()\n+ cmd_config.append(f\"--fsdp_sharding_strategy={i+1}\")\n+ if strategy != \"FULL_SHARD\":\n+ continue\n+ state_dict_config_index = len(cmd_config)\n+ for state_dict_type in FSDP_STATE_DICT_TYPE:\n+ cmd_config = cmd_config[:state_dict_config_index]\n+ if state_dict_type == \"SHARDED_STATE_DICT\":\n+ continue\n+ cmd_config.append(f\"--fsdp_state_dict_type={state_dict_type}\")\n+ cmd_config.extend(\n+ [\n+ self.test_file_path,\n+ f\"--output_dir={self.tmpdir}\",\n+ \"--partial_train_epoch=1\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_config, env=os.environ.copy())\n+\n+ cmd_config = cmd_config[:-1]\n+ resume_from_checkpoint = os.path.join(self.tmpdir, \"epoch_0\")\n+ cmd_config.extend(\n+ [\n+ f\"--resume_from_checkpoint={resume_from_checkpoint}\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_config, env=os.environ.copy())\n+\n+ def test_peak_memory_usage(self):\n+ self.test_file_path = os.path.join(self.test_scripts_folder, \"test_peak_memory_usage.py\")\n+ cmd = [\n+ \"accelerate\",\n+ \"launch\",\n+ \"--num_processes=2\",\n+ \"--num_machines=1\",\n+ \"--machine_rank=0\",\n+ ]\n+ for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():\n+ cmd_config = cmd.copy()\n+ if \"fp16\" in spec:\n+ cmd_config.extend([\"--mixed_precision=fp16\"])\n+ else:\n+ cmd_config.extend([\"--mixed_precision=no\"])\n+\n+ if \"multi_gpu\" in spec:\n+ continue\n+ else:\n+ cmd_config.extend([\"--use_fsdp\"])\n+ for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n+ if strategy.lower() in spec:\n+ cmd_config.append(f\"--fsdp_sharding_strategy={i+1}\")\n+ break\n+\n+ if \"cpu_offload\" in spec:\n+ cmd_config.append(\"--fsdp_offload_params=True\")\n+\n+ for policy in FSDP_AUTO_WRAP_POLICY:\n+ if policy.lower() in spec:\n+ cmd_config.append(f\"--fsdp_auto_wrap_policy={policy}\")\n+ break\n+\n+ if policy == \"TRANSFORMER_BASED_WRAP\":\n+ cmd_config.append(\"--fsdp_transformer_layer_cls_to_wrap=BertLayer\")\n+ elif policy == \"SIZE_BASED_WRAP\":\n+ cmd_config.append(\"--fsdp_min_num_params=2000\")\n+\n+ cmd_config.extend(\n+ [\n+ self.test_file_path,\n+ f\"--output_dir={self.tmpdir}\",\n+ f\"--peak_memory_upper_bound={peak_mem_upper_bound}\",\n+ f\"--n_train={self.n_train}\",\n+ f\"--n_val={self.n_val}\",\n+ ]\n+ )\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd_config, env=os.environ.copy())\n", "code_comments": [ { "body": "We can't change the API of this function, it's public and has been released now.", "diff_hunk": "@@ -43,8 +44,9 @@\n \n \n def save_accelerator_state(\n+ accelerator,\n output_dir: str,\n- model_states: List[dict],\n+ models: list,", "from_author": false }, { "body": "The logic specific to the Accelerator should go in the `Accelerator.save_state` method.", "diff_hunk": "@@ -68,20 +72,40 @@ def save_accelerator_state(\n An optional gradient scaler instance to save\n \"\"\"\n # Model states\n- for i, state in enumerate(model_states):\n- weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n- output_model_file = os.path.join(output_dir, weights_name)\n- save(state, output_model_file)\n- logger.info(f\"Model weights saved in {output_model_file}\")\n+ for i, model in enumerate(models):\n+ if accelerator.distributed_type == DistributedType.FSDP:\n+ logger.info(\"Saving FSDP model\")\n+ accelerator.state.fsdp_plugin.save_model(accelerator, model, output_dir, i)\n+ logger.info(f\"FSDP Model saved to output dir {output_dir}\")\n+ elif accelerator.distributed_type == DistributedType.DEEPSPEED:\n+ logger.info(\"Saving DeepSpeed Model and Optimizer\")\n+ ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\n+ model.save_checkpoint(output_dir, ckpt_id)\n+ logger.info(f\"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}\")\n+ else:\n+ state = accelerator.get_state_dict(model, unwrap=False)\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")", "from_author": false }, { "body": "Same here, we can't change the API and the logic specific to the Accelerator should go in the Accelerator.", "diff_hunk": "@@ -109,11 +133,21 @@ def save_accelerator_state(\n return output_dir\n \n \n-def load_accelerator_state(input_dir, models, optimizers, schedulers, process_index, scaler=None):\n+def load_accelerator_state(\n+ accelerator,\n+ input_dir: str,\n+ models: list,\n+ optimizers: list,\n+ schedulers: list,\n+ process_index: int,\n+ scaler: GradScaler = None,\n+):", "from_author": false }, { "body": "Chaing the name will break existing configs, and also it's already in something called `FSDP` so it seems redundant to add it here.", "diff_hunk": "@@ -210,12 +211,12 @@ def get_cluster_input():\n for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"sharding_strategy\"] = _ask_field(\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(", "from_author": false }, { "body": "Let's not rely on this constant and manually put lists and defaults here. We can always add more in the future.", "diff_hunk": "@@ -250,6 +251,15 @@ def get_cluster_input():\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n default=FSDP_BACKWARD_PREFETCH[0],\n )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=FSDP_STATE_DICT_TYPE[0],\n+ )", "from_author": false }, { "body": "This and all below is breaking. We should at least have one deprecation cycle with the old names.", "diff_hunk": "@@ -141,19 +141,19 @@ def launch_command_parser(subparsers=None):\n help=\"Whether to use fsdp.\",\n )\n parser.add_argument(\n- \"--offload_params\",\n+ \"--fsdp_offload_params\",", "from_author": false }, { "body": "Let's write a default here to not rely on the order of this constant.", "diff_hunk": "@@ -472,6 +492,17 @@ def __post_init__(self):\n if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:\n self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)\n \n+ if self.state_dict_type is None:\n+ state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", FSDP_STATE_DICT_TYPE[0])", "from_author": false }, { "body": "Hello, had to do this so that launcher has arguments with `fsdp` prefix. I thought this was fair game as we specified FSDP integration was experimental given that FSDP itself has changed a lot. This was more around readability/grouping for folks to easily understand all the variables necessary for FSDP as there are a lot more now. If you think we can do away with it, I will revert the variable name changes.", "diff_hunk": "@@ -210,12 +211,12 @@ def get_cluster_input():\n for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"sharding_strategy\"] = _ask_field(\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(", "from_author": true }, { "body": "Hello, we maintain that constant in our repo at `accelerate.utils.constants.py` and hence I don't think this would be an issue. ", "diff_hunk": "@@ -250,6 +251,15 @@ def get_cluster_input():\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n default=FSDP_BACKWARD_PREFETCH[0],\n )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=FSDP_STATE_DICT_TYPE[0],\n+ )", "from_author": true }, { "body": "It would be more readable to just have a value.", "diff_hunk": "@@ -250,6 +251,15 @@ def get_cluster_input():\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n default=FSDP_BACKWARD_PREFETCH[0],\n )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=FSDP_STATE_DICT_TYPE[0],\n+ )", "from_author": false }, { "body": "As mentioned above, we are separately maintaining it in our repo and hence this should not be an issue. ", "diff_hunk": "@@ -472,6 +492,17 @@ def __post_init__(self):\n if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:\n self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)\n \n+ if self.state_dict_type is None:\n+ state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", FSDP_STATE_DICT_TYPE[0])", "from_author": true }, { "body": "Changing the launcher is fair game, but existing configs breaking when updating Accelerate is a bit too annoying to the user I think.", "diff_hunk": "@@ -210,12 +211,12 @@ def get_cluster_input():\n for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"sharding_strategy\"] = _ask_field(\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(", "from_author": false }, { "body": "As mentioned above, I thought this was fair game as we specified FSDP integration was experimental given that FSDP itself has changed a lot. This was more around readability/grouping for folks to easily understand all the variables necessary for FSDP as there are a lot more now. If you think we can do away with it at the expense of readability/clarity, I will revert the variable name changes.", "diff_hunk": "@@ -141,19 +141,19 @@ def launch_command_parser(subparsers=None):\n help=\"Whether to use fsdp.\",\n )\n parser.add_argument(\n- \"--offload_params\",\n+ \"--fsdp_offload_params\",", "from_author": true }, { "body": "You mean for the `default` or entire query?", "diff_hunk": "@@ -250,6 +251,15 @@ def get_cluster_input():\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n default=FSDP_BACKWARD_PREFETCH[0],\n )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=FSDP_STATE_DICT_TYPE[0],\n+ )", "from_author": true }, { "body": "But it's not readable.", "diff_hunk": "@@ -472,6 +492,17 @@ def __post_init__(self):\n if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:\n self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)\n \n+ if self.state_dict_type is None:\n+ state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", FSDP_STATE_DICT_TYPE[0])", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -472,6 +492,17 @@ def __post_init__(self):\n if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:\n self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)\n \n+ if self.state_dict_type is None:\n+ state_dict_type_policy = os.environ.get(\"FSDP_STATE_DICT_TYPE\", FSDP_STATE_DICT_TYPE[0])", "from_author": true }, { "body": "Done. Added deprecation args with the warnings.", "diff_hunk": "@@ -141,19 +141,19 @@ def launch_command_parser(subparsers=None):\n help=\"Whether to use fsdp.\",\n )\n parser.add_argument(\n- \"--offload_params\",\n+ \"--fsdp_offload_params\",", "from_author": true }, { "body": "Understood. Added code to handle this wherein old configs should work without any issues. ", "diff_hunk": "@@ -210,12 +211,12 @@ def get_cluster_input():\n for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n- fsdp_config[\"sharding_strategy\"] = _ask_field(\n+ fsdp_config[\"fsdp_sharding_strategy\"] = _ask_field(", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -109,11 +133,21 @@ def save_accelerator_state(\n return output_dir\n \n \n-def load_accelerator_state(input_dir, models, optimizers, schedulers, process_index, scaler=None):\n+def load_accelerator_state(\n+ accelerator,\n+ input_dir: str,\n+ models: list,\n+ optimizers: list,\n+ schedulers: list,\n+ process_index: int,\n+ scaler: GradScaler = None,\n+):", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -68,20 +72,40 @@ def save_accelerator_state(\n An optional gradient scaler instance to save\n \"\"\"\n # Model states\n- for i, state in enumerate(model_states):\n- weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n- output_model_file = os.path.join(output_dir, weights_name)\n- save(state, output_model_file)\n- logger.info(f\"Model weights saved in {output_model_file}\")\n+ for i, model in enumerate(models):\n+ if accelerator.distributed_type == DistributedType.FSDP:\n+ logger.info(\"Saving FSDP model\")\n+ accelerator.state.fsdp_plugin.save_model(accelerator, model, output_dir, i)\n+ logger.info(f\"FSDP Model saved to output dir {output_dir}\")\n+ elif accelerator.distributed_type == DistributedType.DEEPSPEED:\n+ logger.info(\"Saving DeepSpeed Model and Optimizer\")\n+ ckpt_id = f\"{MODEL_NAME}\" if i == 0 else f\"{MODEL_NAME}_{i}\"\n+ model.save_checkpoint(output_dir, ckpt_id)\n+ logger.info(f\"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}\")\n+ else:\n+ state = accelerator.get_state_dict(model, unwrap=False)\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -43,8 +44,9 @@\n \n \n def save_accelerator_state(\n+ accelerator,\n output_dir: str,\n- model_states: List[dict],\n+ models: list,", "from_author": true }, { "body": "For the default", "diff_hunk": "@@ -250,6 +251,15 @@ def get_cluster_input():\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n default=FSDP_BACKWARD_PREFETCH[0],\n )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=FSDP_STATE_DICT_TYPE[0],\n+ )", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -250,6 +251,15 @@ def get_cluster_input():\n lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n default=FSDP_BACKWARD_PREFETCH[0],\n )\n+ fsdp_state_dict_type_query = \"What should be your FSDP's state dict type (\"\n+ for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):\n+ fsdp_state_dict_type_query += f\"[{i}] {state_dict_type}, \"\n+ fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_state_dict_type\"] = _ask_field(\n+ fsdp_state_dict_type_query,\n+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],\n+ default=FSDP_STATE_DICT_TYPE[0],\n+ )", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "`make test_deepspeed` results below (~6 minutes) :\r\n\r\n![Screenshot 2022-07-26 at 1 11 22 PM](https://user-images.githubusercontent.com/13534540/180951182-b4bd178b-2d90-49a6-b6f1-9cfcf3d59ffd.png)\r\n\r\n`make test_fsdp` results below (~4 minutes):\r\n\r\n![Screenshot 2022-07-26 at 1 25 05 PM](https://user-images.githubusercontent.com/13534540/180953795-7e7937fb-723d-4848-82da-051328bb1566.png)\r\n\r\n", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/532", "pr_id": 1000687184 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 5af38afaf..7f36121c1 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -434,22 +434,23 @@ def print(self, *args, **kwargs):\n \n def _prepare_one(self, obj, first_pass=False):\n # First pass of preparation: DataLoader, model, optimizer\n- if isinstance(obj, torch.utils.data.DataLoader) and first_pass:\n- return self.prepare_data_loader(obj)\n- elif isinstance(obj, torch.nn.Module) and first_pass:\n- self._models.append(obj)\n- return self.prepare_model(obj)\n- elif isinstance(obj, torch.optim.Optimizer) and first_pass:\n- optimizer = self.prepare_optimizer(obj)\n- self._optimizers.append(optimizer)\n- return optimizer\n+ if first_pass:\n+ if isinstance(obj, torch.utils.data.DataLoader):\n+ return self.prepare_data_loader(obj)\n+ elif isinstance(obj, torch.nn.Module):\n+ self._models.append(obj)\n+ return self.prepare_model(obj)\n+ elif isinstance(obj, torch.optim.Optimizer):\n+ optimizer = self.prepare_optimizer(obj)\n+ self._optimizers.append(optimizer)\n+ return optimizer\n # Second pass of preparation: LR scheduler (which need the full list of optimizers)\n- elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler) and not first_pass:\n+ elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler):\n scheduler = self.prepare_scheduler(obj)\n self._schedulers.append(scheduler)\n return scheduler\n- else:\n- return obj\n+ # Return the unprocessed object if previous criteria was not met\n+ return obj\n \n def _prepare_fsdp(self, *args):\n result = []\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/531", "pr_id": 999551460 }, { "diff": "diff --git a/docs/source/big_modeling.mdx b/docs/source/big_modeling.mdx\nindex a5cb3d4ed..051d1bda7 100644\n--- a/docs/source/big_modeling.mdx\n+++ b/docs/source/big_modeling.mdx\n@@ -203,6 +203,65 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM for the model weights:\n+\n+```python\n+from accelerate import infer_auto_device_map\n+\n+device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\", \"cpu\": \"30GiB\"})\n+```\n+\n+<Tip warning={true}>\n+\n+When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.\n+\n+Therefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors.\n+\n+</Tip>\n+\n+Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup the close to ideal map is:\n+\n+```python\n+max_memory = {0: \"30GIB\", 1: \"46GIB\", 2: \"46GIB\", 3: \"46GIB\", 4: \"46GIB\", 5: \"46GIB\", 6: \"46GIB\", 7: \"46GIB\"}\n+```\n+as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0.\n+\n+If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `\"cpu\"` for CPU offload, `\"disk\"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2\": 1}\n+```\n+\n+another one that is valid could be:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2.linear1\": 0, \"block2.linear2\": 1, \"block2.linear3\": 1}\n+```\n+\n+On the other hand, this one is not valid as it does not cover every parameter of the model:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2.linear1\": 1, \"block2.linear2\": 1}\n+```\n+\n+<Tip>\n+\n+To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs.\n+\n+</Tip>\n+\n ## Limits and further development\n \n We are aware of the current limitations in the API:\n", "code_comments": [ { "body": "I think including the ticks here may have broken the right navbar in the docs?", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a `device_map`", "from_author": false }, { "body": "Arg!", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a `device_map`", "from_author": true }, { "body": "I think here you may want to say \"want to use more than ... for model weights\". The addition is to be specific that it's just \"model weights\". I don't think accelerate controls activation and temp memory, does it?\r\n\r\nSo should it say - don't forget to leave memory for activations?\r\n\r\nWe really need to try to come up with the math that converts batch size into how much memory it'd need.\r\n\r\nOne way is to do it empirically by simply measuring with a given bs and reporting back to user a breakdown - this is how much was used for weights and how much for the activations/temps. But may not succeed on OOM.", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM:", "from_author": false }, { "body": "```suggestion\r\nWhen a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.\r\n\r\nTherefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors.\r\n```\r\n\r\nif you're open to a more detailed explanation of what's going on and how to measure it.", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM:\n+\n+```python\n+from accelerate import infer_auto_device_map\n+\n+device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\", \"cpu\": \"30GiB\"})\n+```\n+\n+<Tip warning={true}>\n+\n+PyTorch takes some space on a GPU, which can be up to 2GB. You should account for this when passing along `max_memory` to avoid out-of-memory errors.", "from_author": false }, { "body": "Should it recommend that one puts the params in a sequential order and not manually organize them so that they fit better? e.g. putting a huge 176 embedding somewhere with a few tiny weights would balance things nicer, but it'd be detrimental to performance.", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM:\n+\n+```python\n+from accelerate import infer_auto_device_map\n+\n+device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\", \"cpu\": \"30GiB\"})\n+```\n+\n+<Tip warning={true}>\n+\n+PyTorch takes some space on a GPU, which can be up to 2GB. You should account for this when passing along `max_memory` to avoid out-of-memory errors.\n+\n+</Tip>\n+\n+If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `\"cpu\"` for CPU offload, `\"disk\"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2\": 1}\n+```\n+\n+another one that is valid could be:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2.linear1\": 0, \"block2.linear2\": 1, \"block2.linear3\": 1}\n+```\n+\n+On the other hand, this one is not valid as it does not cover every parameter of the model:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2.linear1\": 1, \"block2.linear2\": 1}\n+```\n+", "from_author": false }, { "body": "````suggestion\r\n\r\nAdditionally, it's important to know that the first GPU consumes more memory than the rest of the GPUs for managing activations, therefore if you would like to optimize the maximum batch size and you have many GPUs. Give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup the close to ideal map is:\r\n```\r\n return {0: '30GIB', 1: '46GIB', 2: '46GIB', 3: '46GIB', 4: '46GIB', 5: '46GIB', 6: '46GIB', 7: '46GIB'}\r\n```\r\nas you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0.\r\n````", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM:\n+\n+```python\n+from accelerate import infer_auto_device_map\n+\n+device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\", \"cpu\": \"30GiB\"})\n+```\n+\n+<Tip warning={true}>\n+\n+PyTorch takes some space on a GPU, which can be up to 2GB. You should account for this when passing along `max_memory` to avoid out-of-memory errors.\n+\n+</Tip>\n+", "from_author": false }, { "body": "This part is only for `generate`. Will rephrase a bit after accepting your suggestion.", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM:\n+\n+```python\n+from accelerate import infer_auto_device_map\n+\n+device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\", \"cpu\": \"30GiB\"})\n+```\n+\n+<Tip warning={true}>\n+\n+PyTorch takes some space on a GPU, which can be up to 2GB. You should account for this when passing along `max_memory` to avoid out-of-memory errors.\n+\n+</Tip>\n+", "from_author": true }, { "body": "Good point!", "diff_hunk": "@@ -203,6 +203,50 @@ This only supports inference of your model, not training. Most of the computatio\n \n </Tip>\n \n+## Designing a device map\n+\n+You can let πŸ€— Accelerate handle the device map computation by setting `device_map=\"auto\"` or create one yourself, if you want more control over where each layer should go.\n+\n+<Tip>\n+\n+You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+\n+</Tip>\n+\n+First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `\"cpu\"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `\"10GiB\"` or `\"10GB\"`.\n+\n+Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM:\n+\n+```python\n+from accelerate import infer_auto_device_map\n+\n+device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\", \"cpu\": \"30GiB\"})\n+```\n+\n+<Tip warning={true}>\n+\n+PyTorch takes some space on a GPU, which can be up to 2GB. You should account for this when passing along `max_memory` to avoid out-of-memory errors.\n+\n+</Tip>\n+\n+If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `\"cpu\"` for CPU offload, `\"disk\"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2\": 1}\n+```\n+\n+another one that is valid could be:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2.linear1\": 0, \"block2.linear2\": 1, \"block2.linear3\": 1}\n+```\n+\n+On the other hand, this one is not valid as it does not cover every parameter of the model:\n+\n+```python\n+device_map = {\"block1\": 0, \"block2.linear1\": 1, \"block2.linear2\": 1}\n+```\n+", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Should have addressed all your comments @stas00, let me know if all looks right to you and I'll merge :-)", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/530", "pr_id": 999551216 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 85b1f3237..907dfdac4 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -260,7 +260,7 @@ def load_checkpoint_and_dispatch(\n offload_folder: Optional[Union[str, os.PathLike]] = None,\n offload_buffers: bool = False,\n dtype: Optional[Union[str, torch.dtype]] = None,\n- offload_state_dict: bool = False,\n+ offload_state_dict: Optional[bool] = None,\n preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n@@ -292,9 +292,10 @@ def load_checkpoint_and_dispatch(\n well as the parameters.\n dtype (`str` or `torch.dtype`, *optional*):\n If provided, the weights will be converted to that type when loaded.\n- offload_state_dict (`bool`, *optional*, defaults to `False`):\n+ offload_state_dict (`bool`, *optional*):\n If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\n- the weight of the CPU state dict + the biggest shard does not fit.\n+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map\n+ picked contains `\"disk\"` values.\n preload_module_classes (`List[str]`, *optional*):\n A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n of the forward. This should only be used for classes that have submodules which are registered but not\n@@ -305,6 +306,8 @@ def load_checkpoint_and_dispatch(\n device_map = infer_auto_device_map(\n model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype\n )\n+ if offload_state_dict is None and \"disk\" in device_map.values():\n+ offload_state_dict = True\n load_checkpoint_in_model(\n model,\n checkpoint,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/529", "pr_id": 999415644 }, { "diff": "diff --git a/src/accelerate/commands/test.py b/src/accelerate/commands/test.py\nindex 379233679..41da75596 100644\n--- a/src/accelerate/commands/test.py\n+++ b/src/accelerate/commands/test.py\n@@ -43,7 +43,7 @@ def test_command_parser(subparsers=None):\n \n \n def test_command(args):\n- script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + [\"test_utils\", \"test_script.py\"])\n+ script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + [\"test_utils\", \"scripts\", \"test_script.py\"])\n \n test_args = f\"\"\"\n --config_file={args.config_file} {script_name}\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/528", "pr_id": 999349422 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 9b6e7f069..e781a5cea 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -272,7 +272,7 @@ def __init__(\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n \n # Only on the GPU do we care about scaling the gradients\n- if torch.cuda.is_available():\n+ if torch.cuda.is_available() and self.device.type != \"cpu\":\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex cffe9990b..bda9e6b38 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -256,7 +256,14 @@ def simple_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_CPU\"] = str(args.cpu)\n+ current_env[\"USE_CPU\"] = str(args.cpu or args.use_cpu)\n+ if args.num_machines > 1:\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n+ elif args.num_processes > 1:\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip if args.main_process_ip is not None else \"127.0.0.1\"\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port) if args.main_process_port is not None else \"29500\"\n+\n try:\n mixed_precision = PrecisionType(args.mixed_precision.lower())\n except ValueError:\n@@ -269,6 +276,7 @@ def simple_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n \n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n", "code_comments": [ { "body": "Should this be under and `if args.num_machines > 1`? It seems related to multinode training.", "diff_hunk": "@@ -257,6 +257,8 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)", "from_author": false }, { "body": "thanks for the suggestion, a patch is uploaded to fix it", "diff_hunk": "@@ -257,6 +257,8 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)", "from_author": true }, { "body": "If they are needed, I would rather use the ones set par the user than hardcoding a default, which may create hard-to-debug issues for users.", "diff_hunk": "@@ -257,6 +257,12 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n+ if args.num_machines > 1:\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n+ elif args.num_processes > 1:\n+ current_env[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ current_env[\"MASTER_PORT\"] = \"29500\"", "from_author": false }, { "body": "hi, sgugger, now in the accelerate config, if num_machines == 1, the master ip and port is not set. and if num_process > 1 and DDP is used in one machine, we still need the master ip and port", "diff_hunk": "@@ -257,6 +257,12 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n+ if args.num_machines > 1:\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n+ elif args.num_processes > 1:\n+ current_env[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ current_env[\"MASTER_PORT\"] = \"29500\"", "from_author": true }, { "body": "But they could be set in the args passed, so maybe only use this default if the args is empty/None?", "diff_hunk": "@@ -257,6 +257,12 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n+ if args.num_machines > 1:\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n+ elif args.num_processes > 1:\n+ current_env[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ current_env[\"MASTER_PORT\"] = \"29500\"", "from_author": false }, { "body": "done, thanks for the suggestion", "diff_hunk": "@@ -257,6 +257,12 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n+ if args.num_machines > 1:\n+ current_env[\"MASTER_ADDR\"] = args.main_process_ip\n+ current_env[\"MASTER_PORT\"] = str(args.main_process_port)\n+ elif args.num_processes > 1:\n+ current_env[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ current_env[\"MASTER_PORT\"] = \"29500\"", "from_author": true } ], "context": [ { "body": "@sgugger @yao-matrix please help review", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thansk again for your contribution!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/527", "pr_id": 998991162 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2dec26a98..9b6e7f069 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -161,7 +161,10 @@ def __init__(\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n self.logging_dir = logging_dir\n- self.log_with = filter_trackers(log_with, self.logging_dir)\n+ trackers = filter_trackers(log_with, self.logging_dir)\n+ if len(trackers) < 1 and log_with is not None:\n+ warnings.warn(f\"`log_with={log_with}` was passed but no supported trackers are currently installed.\")\n+ self.log_with = trackers\n \n if mixed_precision is not None:\n mixed_precision = str(mixed_precision)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/524", "pr_id": 997698799 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2dec26a98..596998f1e 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -235,6 +235,13 @@ def __init__(\n **kwargs,\n )\n \n+ if (\n+ (mixed_precision != \"bf16\")\n+ and getattr(self.state, \"downcast_bfloat\", False)\n+ and (self.state.distributedType != DistributedType.TPU)\n+ ):\n+ raise ValueError(\"Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU\")\n+\n if gradient_accumulation_steps > 1:\n if self.state.distributed_type == DistributedType.TPU:\n raise NotImplementedError(\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 0051d899a..8a4f38f9e 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -260,11 +260,18 @@ def get_cluster_input():\n else:\n mixed_precision = \"no\"\n \n+ downcast_bf16 = \"no\"\n+ if distributed_type == DistributedType.TPU and mixed_precision == \"bf16\":\n+ downcast_bf16 = _ask_field(\n+ \"Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?\", default=\"no\"\n+ )\n+\n return ClusterConfig(\n compute_environment=ComputeEnvironment.LOCAL_MACHINE,\n distributed_type=distributed_type,\n num_processes=num_processes,\n mixed_precision=mixed_precision,\n+ downcast_bf16=downcast_bf16,\n machine_rank=machine_rank,\n num_machines=num_machines,\n main_process_ip=main_process_ip,\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 1ef68200b..11ca0ca90 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -143,6 +143,8 @@ class ClusterConfig(BaseConfig):\n deepspeed_config: dict = None\n # args for fsdp\n fsdp_config: dict = None\n+ # args for TPU\n+ downcast_bf16: bool = False\n \n def __post_init__(self):\n if self.deepspeed_config is None:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex cffe9990b..75c6c7b1c 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -34,6 +34,7 @@\n get_launch_prefix,\n is_deepspeed_available,\n is_sagemaker_available,\n+ patch_environment,\n )\n from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from accelerate.utils.dataclasses import SageMakerDistributedType\n@@ -198,6 +199,11 @@ def launch_command_parser(subparsers=None):\n default=None,\n help=\"The name of the main function to be executed in your script (only for TPU training).\",\n )\n+ parser.add_argument(\n+ \"--downcast_bf16\",\n+ action=\"store_true\",\n+ help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32\",\n+ )\n parser.add_argument(\n \"-m\",\n \"--module\",\n@@ -425,9 +431,19 @@ def deepspeed_launcher(args):\n def tpu_launcher(args):\n import torch_xla.distributed.xla_multiprocessing as xmp\n \n+ current_env = {}\n+\n if args.no_python:\n raise ValueError(\"--no_python cannot be used with TPU launcher\")\n \n+ if args.mixed_precision == \"bf16\":\n+ if args.downcast_bf16:\n+ current_env[\"XLA_USE_BF16\"] = \"0\"\n+ current_env[\"XLA_DOWNCAST_BF16\"] = \"1\"\n+ else:\n+ current_env[\"XLA_USE_BF16\"] = \"1\"\n+ current_env[\"XLA_DOWNCAST_BF16\"] = \"0\"\n+\n if args.module:\n mod_name = args.training_script\n else:\n@@ -447,7 +463,8 @@ def tpu_launcher(args):\n sys.argv = [mod.__file__] + args.training_script_args\n \n main_function = getattr(mod, args.main_training_function)\n- xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)\n+ with patch_environment(**current_env):\n+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)\n \n \n def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 4482341f3..3fcf7d2e2 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -115,7 +115,14 @@ def __init__(\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n if mixed_precision == \"bf16\":\n- os.environ[\"XLA_USE_BF16\"] = str(1)\n+ if os.environ.get(\"DOWNCAST_BF16\"):\n+ os.environ[\"XLA_USE_BF16\"] = str(0)\n+ os.environ[\"XLA_DOWNCAST_BF16\"] = str(1)\n+ self.downcast_bfloat = True\n+ else:\n+ os.environ[\"XLA_USE_BF16\"] = str(1)\n+ os.environ[\"XLA_DOWNCAST_BF16\"] = str(0)\n+ self.downcast_bfloat = False\n self.mixed_precision = mixed_precision\n elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n assert (\n", "code_comments": [ { "body": "Very much against adding a new Accelerator parameter for such a niche feature. It should be in the config only.", "diff_hunk": "@@ -126,6 +126,9 @@ class Accelerator:\n accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n A path to a directory for storing logs of locally-compatible loggers.\n+ downcast_bf16 (`bool`, *optional*):\n+ If set to `True` and using `mixed_precision=\"bf16\"` then `torch.float` will become `bfloat16` and\n+ `torch.double` will remain `float32` on TPUs.", "from_author": false }, { "body": "Doesn't seem to be able to be None, so I'm confused about this change in typing.", "diff_hunk": "@@ -156,8 +159,9 @@ def __init__(\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n+ downcast_bf16: Optional[bool] = False,\n dispatch_batches: Optional[bool] = None,\n- step_scheduler_with_optimizer: bool = True,\n+ step_scheduler_with_optimizer: Optional[bool] = True,", "from_author": false }, { "body": "Okay, will adjust on Monday!\r\n\r\n(This should be in the release after this next one anyways)", "diff_hunk": "@@ -126,6 +126,9 @@ class Accelerator:\n accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n A path to a directory for storing logs of locally-compatible loggers.\n+ downcast_bf16 (`bool`, *optional*):\n+ If set to `True` and using `mixed_precision=\"bf16\"` then `torch.float` will become `bfloat16` and\n+ `torch.double` will remain `float32` on TPUs.", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/523", "pr_id": 997574255 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex eedbf3392..8da906ee9 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -162,6 +162,7 @@ def collate_fn(examples):\n # New Code #\n # For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer\n model = accelerator.prepare(model)\n+ accelerator.print(model)\n \n # Instantiate optimizer\n # New Code #\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2dec26a98..c3182e441 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -262,8 +262,13 @@ def __init__(\n if not torch.cuda.is_available():\n raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n- self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n- elif self.state.mixed_precision == \"bf16\":\n+ if self.distributed_type == DistributedType.FSDP:\n+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n+\n+ self.scaler = ShardedGradScaler(**kwargs)\n+ else:\n+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n+ elif self.state.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.FSDP:\n self.native_amp = is_bf16_available(True)\n if mixed_precision == \"bf16\" and not self.native_amp and not is_tpu_available():\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n@@ -566,6 +571,7 @@ def prepare_model(self, model):\n # Check if the model is already a FSDP model due to `Manual Wrapping` and if so,\n # don't wrap it again\n if type(model) != FSDP:\n+ self.state.fsdp_plugin.set_auto_wrap_policy(model)\n fsdp_plugin = self.state.fsdp_plugin\n model = FSDP(\n model,\n@@ -573,6 +579,7 @@ def prepare_model(self, model):\n cpu_offload=fsdp_plugin.cpu_offload,\n auto_wrap_policy=fsdp_plugin.auto_wrap_policy,\n backward_prefetch=fsdp_plugin.backward_prefetch,\n+ mixed_precision=fsdp_plugin.mixed_precision_policy,\n ignored_modules=fsdp_plugin.ignored_modules,\n )\n if not fsdp_plugin.cpu_offload.offload_params:\n@@ -817,6 +824,7 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n \"\"\"\n if self.distributed_type == DistributedType.FSDP:\n+ self.unscale_gradients()\n parameters = [p for p in parameters]\n for model in self._models:\n if parameters == [p for p in model.parameters()]:\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 0051d899a..d9dd80094 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -15,7 +15,12 @@\n # limitations under the License.\n \n from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\n-from ...utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n+from ...utils.constants import (\n+ DEEPSPEED_MULTINODE_LAUNCHERS,\n+ FSDP_AUTO_WRAP_POLICY,\n+ FSDP_BACKWARD_PREFETCH,\n+ FSDP_SHARDING_STRATEGY,\n+)\n from .config_args import ClusterConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n \n@@ -201,8 +206,12 @@ def get_cluster_input():\n if use_fsdp:\n distributed_type = DistributedType.FSDP\n if distributed_type == DistributedType.FSDP:\n+ sharding_strategy_query = \"What should be your sharding strategy (\"\n+ for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):\n+ sharding_strategy_query += f\"[{i+1}] {strategy}, \"\n+ sharding_strategy_query = sharding_strategy_query[:-2] + \")? [1]: \"\n fsdp_config[\"sharding_strategy\"] = _ask_field(\n- \"What should be your sharding strategy ([1] FULL_SHARD, [2] SHARD_GRAD_OP)? [1]: \",\n+ sharding_strategy_query,\n lambda x: int(x),\n default=1,\n )\n@@ -212,10 +221,34 @@ def get_cluster_input():\n default=False,\n error_message=\"Please enter yes or no.\",\n )\n- fsdp_config[\"min_num_params\"] = _ask_field(\n- \"What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: \",\n- lambda x: int(x),\n- default=1e8,\n+ fsdp_wrap_query = \"What should be your auto wrap policy (\"\n+ for i, wrap_policy in enumerate(FSDP_AUTO_WRAP_POLICY):\n+ fsdp_wrap_query += f\"[{i}] {wrap_policy}, \"\n+ fsdp_wrap_query = fsdp_wrap_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_auto_wrap_policy\"] = _ask_field(\n+ fsdp_wrap_query,\n+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],\n+ default=FSDP_AUTO_WRAP_POLICY[0],\n+ )\n+ if fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[0]:\n+ fsdp_config[\"transformer_layer_cls_to_wrap\"] = _ask_field(\n+ \"What is the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...? : \",\n+ lambda x: str(x),\n+ )\n+ elif fsdp_config[\"fsdp_auto_wrap_policy\"] == FSDP_AUTO_WRAP_POLICY[1]:\n+ fsdp_config[\"min_num_params\"] = _ask_field(\n+ \"What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: \",\n+ lambda x: int(x),\n+ default=1e8,\n+ )\n+ fsdp_backward_prefetch_query = \"What should be your FSDP's backward prefetch policy (\"\n+ for i, backward_prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):\n+ fsdp_backward_prefetch_query += f\"[{i}] {backward_prefetch_policy}, \"\n+ fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + \")? [0]: \"\n+ fsdp_config[\"fsdp_backward_prefetch_policy\"] = _ask_field(\n+ fsdp_backward_prefetch_query,\n+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],\n+ default=FSDP_BACKWARD_PREFETCH[0],\n )\n \n if distributed_type == DistributedType.TPU:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex cffe9990b..5bfd9894d 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -158,6 +158,25 @@ def launch_command_parser(subparsers=None):\n default=1,\n help=\"FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).\",\n )\n+ parser.add_argument(\n+ \"--fsdp_auto_wrap_policy\",\n+ type=str,\n+ default=None,\n+ help=\"FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--transformer_layer_cls_to_wrap\",\n+ default=None,\n+ type=str,\n+ help=\"Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... \"\n+ \"(useful only when `use_fsdp` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--fsdp_backward_prefetch_policy\",\n+ default=None,\n+ type=str,\n+ help=\"FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).\",\n+ )\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n@@ -322,9 +341,12 @@ def multi_gpu_launcher(args):\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n if args.use_fsdp:\n current_env[\"USE_FSDP\"] = \"true\"\n+ current_env[\"FSDP_AUTO_WRAP_POLICY\"] = str(args.fsdp_auto_wrap_policy)\n+ current_env[\"FSDP_TRANSFORMER_CLS_TO_WRAP\"] = str(args.transformer_layer_cls_to_wrap)\n current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.offload_params).lower()\n current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.min_num_params)\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.sharding_strategy)\n+ current_env[\"FSDP_BACKWARD_PREFETCH\"] = str(args.fsdp_backward_prefetch_policy)\n current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 4482341f3..38cd40bc7 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -146,9 +146,7 @@ def __init__(\n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n self.distributed_type = DistributedType.FSDP\n if self.mixed_precision != \"no\":\n- raise ValueError(\n- \"Mixed precision is currently not supported for FSDP. Please set `mixed_precision` to `no`.\"\n- )\n+ fsdp_plugin.set_mixed_precision(self.mixed_precision)\n self.fsdp_plugin = fsdp_plugin\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n self.distributed_type = DistributedType.MULTI_CPU\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex 5e31a96c9..00556cad6 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -24,6 +24,9 @@\n SAGEMAKER_PYTHON_VERSION = \"py38\"\n SAGEMAKER_TRANSFORMERS_VERSION = \"4.17.0\"\n SAGEMAKER_PARALLEL_EC2_INSTANCES = [\"ml.p3.16xlarge\", \"ml.p3dn.24xlarge\", \"ml.p4dn.24xlarge\"]\n+FSDP_SHARDING_STRATEGY = [\"FULL_SHARD\", \"SHARD_GRAD_OP\", \"NO_SHARD\"]\n+FSDP_AUTO_WRAP_POLICY = [\"TRANSFORMER_BASED_WRAP\", \"SIZE_BASED_WRAP\", \"NO_WRAP\"]\n+FSDP_BACKWARD_PREFETCH = [\"BACKWARD_PRE\", \"BACKWARD_POST\", \"NO_PREFETCH\"]\n DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\n \n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 28fc3826d..8c8bdf106 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -28,6 +28,8 @@\n \n import torch\n \n+from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH\n+\n \n class KwargsHandler:\n \"\"\"\n@@ -418,22 +420,35 @@ class FullyShardedDataParallelPlugin:\n \n sharding_strategy: \"typing.Any\" = field(\n default=None,\n- metadata={\"help\": \"Possible options are [1] FULL_SHARD, [2] SHARD_GRAD_OP\"},\n+ metadata={\n+ \"help\": \"FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`\"\n+ },\n )\n backward_prefetch: \"typing.Any\" = field(\n default=None,\n- metadata={\"help\": \"Possible options are [1] BACKWARD_PRE, [2] BACKWARD_POST\"},\n+ metadata={\n+ \"help\": \"FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`\"\n+ },\n )\n- auto_wrap_policy: \"typing.Any\" = field(\n+ mixed_precision_policy: \"typing.Any\" = field(\n default=None,\n- metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n+ metadata={\n+ \"help\": \"A config to enable mixed precision training with FullyShardedDataParallel. \"\n+ \"The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. \"\n+ \"Each flag expects `torch.dtype` as the value. \"\n+ \"It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`.\"\n+ },\n )\n- cpu_offload: Optional[Callable] = field(\n+ auto_wrap_policy: Optional[Callable] = field(\n default=None,\n- metadata={\"help\": \"Decides Whether to offload parameters and gradients to CPU.\"},\n+ metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n )\n- min_num_params: int = field(\n- default=None, metadata={\"help\": \"FSDP's minimum number of parameters for Default Auto Wrapping.\"}\n+ cpu_offload: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Decides Whether to offload parameters and gradients to CPU. \"\n+ \"It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`.\"\n+ },\n )\n ignored_modules: Optional[Iterable[torch.nn.Module]] = field(\n default=None,\n@@ -441,8 +456,7 @@ class FullyShardedDataParallelPlugin:\n )\n \n def __post_init__(self):\n- from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n- from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy\n \n if self.sharding_strategy is None:\n self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n@@ -453,11 +467,63 @@ def __post_init__(self):\n else:\n self.cpu_offload = CPUOffload(offload_params=False)\n \n- if self.min_num_params is None:\n- self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+ if self.backward_prefetch is None:\n+ prefetch_policy = os.environ.get(\"FSDP_BACKWARD_PREFETCH\", FSDP_BACKWARD_PREFETCH[-1])\n+ if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:\n+ self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)\n+\n+ @staticmethod\n+ def get_module_class_from_name(module, name):\n+ \"\"\"\n+ Gets a class from a module by its name.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to get the class from.\n+ name (`str`): The name of the class.\n+ \"\"\"\n+ modules_children = list(module.children())\n+ if module.__class__.__name__ == name:\n+ return module.__class__\n+ elif len(modules_children) == 0:\n+ return\n+ else:\n+ for child_module in modules_children:\n+ module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)\n+ if module_class is not None:\n+ return module_class\n+\n+ def set_auto_wrap_policy(self, model):\n+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy\n \n if self.auto_wrap_policy is None:\n- if self.min_num_params > 0:\n+ auto_wrap_policy = os.environ.get(\"FSDP_AUTO_WRAP_POLICY\", FSDP_AUTO_WRAP_POLICY[-1])\n+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:\n+ transformer_cls_to_wrap = os.environ.get(\"FSDP_TRANSFORMER_CLS_TO_WRAP\", \"\")\n+ transformer_cls_to_wrap = FullyShardedDataParallelPlugin.get_module_class_from_name(\n+ model, transformer_cls_to_wrap\n+ )\n+ if transformer_cls_to_wrap is None:\n+ raise Exception(\"Could not find the transformer layer class to wrap in the model.\")\n self.auto_wrap_policy = functools.partial(\n- size_based_auto_wrap_policy, min_num_params=self.min_num_params\n+ transformer_auto_wrap_policy,\n+ # Transformer layer class to wrap\n+ transformer_layer_cls={transformer_cls_to_wrap},\n )\n+ elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:\n+ min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+ if min_num_params > 0:\n+ self.auto_wrap_policy = functools.partial(\n+ size_based_auto_wrap_policy, min_num_params=min_num_params\n+ )\n+\n+ def set_mixed_precision(self, mixed_precision):\n+ if mixed_precision == \"fp16\":\n+ dtype = torch.float16\n+ elif mixed_precision == \"bf16\":\n+ dtype = torch.bfloat16\n+ else:\n+ raise ValueError(f\"Unknown mixed precision value: {mixed_precision}\")\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision\n+\n+ if self.mixed_precision_policy is None:\n+ self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 7505c04ce..7b3c8de5a 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -217,7 +217,11 @@ def gather(tensor):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n- elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ elif AcceleratorState().distributed_type in [\n+ DistributedType.DEEPSPEED,\n+ DistributedType.MULTI_GPU,\n+ DistributedType.FSDP,\n+ ]:\n return _gpu_gather(tensor)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather(tensor)\n", "code_comments": [ { "body": "Is this supposed to stay here?", "diff_hunk": "@@ -162,6 +162,7 @@ def collate_fn(examples):\n # New Code #\n # For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer\n model = accelerator.prepare(model)\n+ accelerator.print(model)", "from_author": false }, { "body": "Just to be sure, this is in 1.12?", "diff_hunk": "@@ -146,9 +146,7 @@ def __init__(\n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n self.distributed_type = DistributedType.FSDP\n if self.mixed_precision != \"no\":\n- raise ValueError(\n- \"Mixed precision is currently not supported for FSDP. Please set `mixed_precision` to `no`.\"\n- )\n+ fsdp_plugin.set_mixed_precision(self.mixed_precision)", "from_author": false }, { "body": "Put the comment above so that black leaves everything on one line.", "diff_hunk": "@@ -453,11 +467,57 @@ def __post_init__(self):\n else:\n self.cpu_offload = CPUOffload(offload_params=False)\n \n- if self.min_num_params is None:\n- self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+ @staticmethod\n+ def get_module_class_from_name(module, name):\n+ \"\"\"\n+ Gets a class from a module by its name.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to get the class from.\n+ name (`str`): The name of the class.\n+ \"\"\"\n+ modules_children = list(module.children())\n+ if module.__class__.__name__ == name:\n+ return module.__class__\n+ elif len(modules_children) == 0:\n+ return\n+ else:\n+ for child_module in modules_children:\n+ module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)\n+ if module_class is not None:\n+ return module_class\n+\n+ def set_auto_wrap_policy(self, model):\n+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy\n \n if self.auto_wrap_policy is None:\n- if self.min_num_params > 0:\n+ auto_wrap_policy = os.environ.get(\"FSDP_AUTO_WRAP_POLICY\", FSDP_AUTO_WRAP_POLICY[-1])\n+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:\n+ transformer_cls_to_wrap = os.environ.get(\"FSDP_TRANSFORMER_CLS_TO_WRAP\", \"\")\n+ transformer_cls_to_wrap = FullyShardedDataParallelPlugin.get_module_class_from_name(\n+ model, transformer_cls_to_wrap\n+ )\n self.auto_wrap_policy = functools.partial(\n- size_based_auto_wrap_policy, min_num_params=self.min_num_params\n+ transformer_auto_wrap_policy,\n+ transformer_layer_cls={\n+ transformer_cls_to_wrap, # < ---- Transformer layer class to wrap\n+ },", "from_author": false }, { "body": "Yes, to help users observe which layers have been auto-wrapped in FSDP modules.", "diff_hunk": "@@ -162,6 +162,7 @@ def collate_fn(examples):\n # New Code #\n # For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer\n model = accelerator.prepare(model)\n+ accelerator.print(model)", "from_author": true }, { "body": "Yes, the discussion is happening in this issue https://github.com/pytorch/pytorch/issues/79605#issuecomment-1184410231", "diff_hunk": "@@ -146,9 +146,7 @@ def __init__(\n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n self.distributed_type = DistributedType.FSDP\n if self.mixed_precision != \"no\":\n- raise ValueError(\n- \"Mixed precision is currently not supported for FSDP. Please set `mixed_precision` to `no`.\"\n- )\n+ fsdp_plugin.set_mixed_precision(self.mixed_precision)", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -453,11 +467,57 @@ def __post_init__(self):\n else:\n self.cpu_offload = CPUOffload(offload_params=False)\n \n- if self.min_num_params is None:\n- self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+ @staticmethod\n+ def get_module_class_from_name(module, name):\n+ \"\"\"\n+ Gets a class from a module by its name.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to get the class from.\n+ name (`str`): The name of the class.\n+ \"\"\"\n+ modules_children = list(module.children())\n+ if module.__class__.__name__ == name:\n+ return module.__class__\n+ elif len(modules_children) == 0:\n+ return\n+ else:\n+ for child_module in modules_children:\n+ module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)\n+ if module_class is not None:\n+ return module_class\n+\n+ def set_auto_wrap_policy(self, model):\n+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy\n \n if self.auto_wrap_policy is None:\n- if self.min_num_params > 0:\n+ auto_wrap_policy = os.environ.get(\"FSDP_AUTO_WRAP_POLICY\", FSDP_AUTO_WRAP_POLICY[-1])\n+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:\n+ transformer_cls_to_wrap = os.environ.get(\"FSDP_TRANSFORMER_CLS_TO_WRAP\", \"\")\n+ transformer_cls_to_wrap = FullyShardedDataParallelPlugin.get_module_class_from_name(\n+ model, transformer_cls_to_wrap\n+ )\n self.auto_wrap_policy = functools.partial(\n- size_based_auto_wrap_policy, min_num_params=self.min_num_params\n+ transformer_auto_wrap_policy,\n+ transformer_layer_cls={\n+ transformer_cls_to_wrap, # < ---- Transformer layer class to wrap\n+ },", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/522", "pr_id": 996480632 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 4014e3972..85b1f3237 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -65,7 +65,9 @@ def init_empty_weights(include_buffers: bool = False):\n def register_empty_parameter(module, name, param):\n old_register_parameter(module, name, param)\n if param is not None:\n- module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+ param_cls = type(module._parameters[name])\n+ kwargs = module._parameters[name].__dict__\n+ module._parameters[name] = param_cls(module._parameters[name].to(torch.device(\"meta\")), **kwargs)\n \n def register_empty_buffer(module, name, buffer):\n old_register_buffer(module, name, buffer)\n", "code_comments": [ { "body": "Leaves those poor lines alone :-)", "diff_hunk": "@@ -39,9 +39,7 @@ def init_empty_weights(include_buffers: bool = False):\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`):\n Whether or not to also put all buffers on the meta device while initializing.\n-\n Example:\n-", "from_author": false }, { "body": "🀣 yeah don't know what happened there", "diff_hunk": "@@ -39,9 +39,7 @@ def init_empty_weights(include_buffers: bool = False):\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`):\n Whether or not to also put all buffers on the meta device while initializing.\n-\n Example:\n-", "from_author": true }, { "body": "Fixed in aafea4a1e18ac6562bc678f3e607e0d2c5f89b38 ;) ", "diff_hunk": "@@ -39,9 +39,7 @@ def init_empty_weights(include_buffers: bool = False):\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`):\n Whether or not to also put all buffers on the meta device while initializing.\n-\n Example:\n-", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/519", "pr_id": 995673825 }, { "diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex d52cf5e1e..0e8f31407 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -35,6 +35,7 @@ jobs:\n - name: Run examples on GPUs\n run: |\n source activate accelerate\n+ pip uninstall comet_ml -y\n make test_examples\n \n run_all_tests_multi_gpu:\n@@ -64,4 +65,5 @@ jobs:\n - name: Run examples on GPUs\n run: |\n source activate accelerate\n+ pip uninstall comet_ml -y\n make test_examples\n\\ No newline at end of file\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nindex b95474758..59677b34e 100644\n--- a/.github/workflows/on-merge.yml\n+++ b/.github/workflows/on-merge.yml\n@@ -34,6 +34,7 @@ jobs:\n - name: Run examples on GPUs\n run: |\n source activate accelerate\n+ pip uninstall comet_ml -y\n make test_examples\n \n run_all_tests_multi_gpu:\n@@ -61,4 +62,5 @@ jobs:\n - name: Run examples on GPUs\n run: |\n source activate accelerate\n+ pip uninstall comet_ml -y\n make test_examples\n\\ No newline at end of file\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex c585e742a..c0871b28c 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -38,6 +38,7 @@ jobs:\n run: |\n pip install --upgrade pip\n pip install -e .[test,test_trackers]\n+ if [ ${{ matrix.test-kind }} = test_rest ]; then pip uninstall comet_ml -y; fi\n \n - name: Run Tests\n run: |\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex e380d2d85..95af2a6c9 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -125,7 +125,6 @@ def collate_fn(examples):\n \n def training_function(config, args):\n # New Code #\n- test_labels = None\n test_predictions = []\n # Download the dataset\n datasets = load_dataset(\"glue\", \"mrpc\")\n@@ -152,17 +151,15 @@ def training_function(config, args):\n # New Code #\n # Create our folds:\n folds = kfold.split(np.zeros(datasets[\"train\"].num_rows), datasets[\"train\"][\"label\"])\n-\n+ test_references = []\n # Iterate over them\n- for train_idxs, valid_idxs in folds:\n+ for i, (train_idxs, valid_idxs) in enumerate(folds):\n train_dataloader, eval_dataloader, test_dataloader = get_fold_dataloaders(\n accelerator,\n datasets,\n train_idxs,\n valid_idxs,\n )\n- if test_labels is None:\n- test_labels = datasets[\"validation\"][\"label\"]\n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n \n@@ -231,19 +228,18 @@ def training_function(config, args):\n predictions = outputs.logits\n predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n fold_predictions.append(predictions.cpu())\n- metric.add_batch(\n- predictions=predictions.argmax(dim=-1),\n- references=references,\n- )\n- test_metric = metric.compute()\n+ if i == 0:\n+ # We need all of the test predictions\n+ test_references.append(references.cpu())\n # Use accelerator.print to print only on the main process.\n test_predictions.append(torch.cat(fold_predictions, dim=0))\n # We now need to release all our memory and get rid of the current model, optimizer, etc\n accelerator.free_memory()\n # New Code #\n # Finally we check the accuracy of our folded results:\n- preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(config[\"n_splits\"])).argmax(dim=-1)\n- test_metric = metric.compute(predictions=preds, references=test_labels)\n+ test_references = torch.cat(test_references, dim=0)\n+ preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(args.num_folds)).argmax(dim=-1)\n+ test_metric = metric.compute(predictions=preds, references=test_references)\n accelerator.print(\"Average test metrics from all folds:\", test_metric)\n \n \ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex ef2b1388b..3278a913a 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -165,8 +165,6 @@ def training_function(config, args):\n if args.with_tracking:\n if accelerator.is_main_process:\n run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n accelerator.init_trackers(run, config)\n \n # Now we train the model\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 017f66d06..0b3145070 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -15,6 +15,7 @@\n import asyncio\n import os\n import shutil\n+import subprocess\n import sys\n import tempfile\n import unittest\n@@ -129,6 +130,22 @@ def require_comet_ml(test_case):\n return unittest.skipUnless(is_comet_ml_available(), \"test requires comet_ml\")(test_case)\n \n \n+_atleast_one_tracker_available = (\n+ any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()\n+)\n+\n+\n+def require_trackers(test_case):\n+ \"\"\"\n+ Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none\n+ are installed\n+ \"\"\"\n+ return unittest.skipUnless(\n+ _atleast_one_tracker_available,\n+ \"test requires at least one tracker to be available and for `comet_ml` to not be installed\",\n+ )(test_case)\n+\n+\n class TempDirTestCase(unittest.TestCase):\n \"\"\"\n A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its\n@@ -279,3 +296,24 @@ def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False\n )\n \n return result\n+\n+\n+class SubprocessCallException(Exception):\n+ pass\n+\n+\n+def run_command(command: List[str], return_stdout=False):\n+ \"\"\"\n+ Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture\n+ if an error occured while running `command`\n+ \"\"\"\n+ try:\n+ output = subprocess.check_output(command, stderr=subprocess.STDOUT)\n+ if return_stdout:\n+ if hasattr(output, \"decode\"):\n+ output = output.decode(\"utf-8\")\n+ return output\n+ except subprocess.CalledProcessError as e:\n+ raise SubprocessCallException(\n+ f\"Command `{' '.join(command)}` failed with the following error:\\n\\n{e.output.decode()}\"\n+ ) from e\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 4b298d253..9e1854cd8 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -16,7 +16,6 @@\n import os\n import re\n import shutil\n-import subprocess\n import tempfile\n import unittest\n from unittest import mock\n@@ -24,7 +23,7 @@\n import torch\n \n from accelerate.test_utils.examples import compare_against_test\n-from accelerate.test_utils.testing import TempDirTestCase, slow\n+from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow\n from accelerate.utils import write_basic_config\n \n \n@@ -145,7 +144,7 @@ def test_checkpointing_by_epoch(self):\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n \"\"\".split()\n- _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n+ run_command(self._launch_args + testargs)\n self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n \n def test_checkpointing_by_steps(self):\n@@ -154,7 +153,7 @@ def test_checkpointing_by_steps(self):\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n \"\"\".split()\n- _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE, env=os.environ)\n+ _ = run_command(self._launch_args + testargs)\n self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_5\")))\n \n def test_load_states_by_epoch(self):\n@@ -162,9 +161,7 @@ def test_load_states_by_epoch(self):\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n \"\"\".split()\n- output = subprocess.run(\n- self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n- ).stdout\n+ output = run_command(self._launch_args + testargs, return_stdout=True)\n self.assertNotIn(\"epoch 0:\", output)\n self.assertNotIn(\"epoch 1:\", output)\n self.assertIn(\"epoch 2:\", output)\n@@ -174,9 +171,7 @@ def test_load_states_by_steps(self):\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_5\")}\n \"\"\".split()\n- output = subprocess.run(\n- self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n- ).stdout\n+ output = run_command(self._launch_args + testargs, return_stdout=True)\n if torch.cuda.is_available():\n num_processes = torch.cuda.device_count()\n else:\n@@ -184,11 +179,10 @@ def test_load_states_by_steps(self):\n if num_processes > 1:\n self.assertNotIn(\"epoch 0:\", output)\n self.assertNotIn(\"epoch 1:\", output)\n- self.assertIn(\"epoch 2:\", output)\n else:\n self.assertNotIn(\"epoch 0:\", output)\n self.assertIn(\"epoch 1:\", output)\n- self.assertIn(\"epoch 2:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n \n @slow\n def test_cross_validation(self):\n@@ -197,16 +191,16 @@ def test_cross_validation(self):\n --num_folds 2\n \"\"\".split()\n with mock.patch.dict(os.environ, {\"TESTING_MOCKED_DATALOADERS\": \"0\"}):\n- output = subprocess.run(\n- self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n- ).stdout\n+ output = run_command(self._launch_args + testargs, return_stdout=True)\n results = ast.literal_eval(re.findall(\"({.+})\", output)[-1])\n self.assertGreaterEqual(results[\"accuracy\"], 0.75)\n \n def test_multi_process_metrics(self):\n testargs = [\"examples/by_feature/multi_process_metrics.py\"]\n- _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n+ run_command(self._launch_args + testargs)\n \n+ @require_trackers\n+ @mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n def test_tracking(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n testargs = f\"\"\"\n@@ -214,9 +208,9 @@ def test_tracking(self):\n --with_tracking\n --logging_dir {tmpdir}\n \"\"\".split()\n- _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n+ run_command(self._launch_args + testargs)\n self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n \n def test_gradient_accumulation(self):\n testargs = [\"examples/by_feature/gradient_accumulation.py\"]\n- _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n+ run_command(self._launch_args + testargs)\n", "code_comments": [ { "body": "This will stop working on distributed setup. The predictions will need to be gathered (maybe not the labels).", "diff_hunk": "@@ -229,20 +229,14 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))", "from_author": false }, { "body": "Should be fixed now", "diff_hunk": "@@ -229,20 +229,14 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "All slow ci is now passing here: https://github.com/huggingface/accelerate/runs/7358153325", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/517", "pr_id": 995334779 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 907122036..0051d899a 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -15,6 +15,7 @@\n # limitations under the License.\n \n from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\n+from ...utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from .config_args import ClusterConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n \n@@ -144,6 +145,51 @@ def get_cluster_input():\n \"Please run `pip3 install transformers`.\"\n )\n \n+ if num_machines > 1:\n+ launcher_query = \"Which Type of launcher do you want to use \"\n+ for i, launcher in enumerate(DEEPSPEED_MULTINODE_LAUNCHERS):\n+ launcher_query += f\"[{i}] {launcher}, \"\n+ launcher_query = launcher_query[:-2] + \")? [0]: \"\n+ deepspeed_config[\"deepspeed_multinode_launcher\"] = _ask_field(\n+ launcher_query,\n+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],\n+ default=DEEPSPEED_MULTINODE_LAUNCHERS[0],\n+ )\n+\n+ if deepspeed_config[\"deepspeed_multinode_launcher\"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n+ deepspeed_config[\"deepspeed_hostfile\"] = _ask_field(\n+ \"DeepSpeed configures multi-node compute resources with hostfile. \"\n+ \"Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; \"\n+ \"for more information please refer official [documentation]\"\n+ \"(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). \"\n+ \"Please specify the location of hostfile: \",\n+ lambda x: str(x),\n+ )\n+\n+ is_exclusion_filter = _ask_field(\n+ \"Do you want to specify exclusion filter string? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if is_exclusion_filter:\n+ deepspeed_config[\"deepspeed_exclusion_filter\"] = _ask_field(\n+ \"DeepSpeed exclusion filter string: \",\n+ lambda x: str(x),\n+ )\n+\n+ is_inclusion_filter = _ask_field(\n+ \"Do you want to specify inclusion filter string? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if is_inclusion_filter:\n+ deepspeed_config[\"deepspeed_inclusion_filter\"] = _ask_field(\n+ \"DeepSpeed inclusion filter string: \",\n+ lambda x: str(x),\n+ )\n+\n fsdp_config = {}\n if distributed_type in [DistributedType.MULTI_GPU]:\n use_fsdp = _ask_field(\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex ac91ad1e9..cffe9990b 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -35,6 +35,7 @@\n is_deepspeed_available,\n is_sagemaker_available,\n )\n+from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS\n from accelerate.utils.dataclasses import SageMakerDistributedType\n \n \n@@ -109,6 +110,30 @@ def launch_command_parser(subparsers=None):\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n )\n+ parser.add_argument(\n+ \"--deepspeed_hostfile\",\n+ default=None,\n+ type=str,\n+ help=\"DeepSpeed hostfile for configuring multi-node compute resources.\",\n+ )\n+ parser.add_argument(\n+ \"--deepspeed_exclusion_filter\",\n+ default=None,\n+ type=str,\n+ help=\"DeepSpeed exclusion filter string when using mutli-node setup.\",\n+ )\n+ parser.add_argument(\n+ \"--deepspeed_inclusion_filter\",\n+ default=None,\n+ type=str,\n+ help=\"DeepSpeed inclusion filter string when using mutli-node setup.\",\n+ )\n+ parser.add_argument(\n+ \"--deepspeed_multinode_launcher\",\n+ default=None,\n+ type=str,\n+ help=\"DeepSpeed multi-node launcher to use.\",\n+ )\n parser.add_argument(\n \"--use_fsdp\",\n default=False,\n@@ -312,20 +337,42 @@ def deepspeed_launcher(args):\n raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\n cmd = [\"deepspeed\", \"--no_local_rank\"]\n if args.num_machines > 1:\n- cmd.extend(\n- [\n- \"--num_gpus\",\n- str(args.num_processes // args.num_machines),\n- \"--num_nodes\",\n- str(args.num_machines),\n- \"--node_rank\",\n- str(args.machine_rank),\n- \"--master_addr\",\n- args.main_process_ip,\n- \"--master_port\",\n- str(args.main_process_port),\n- ]\n- )\n+ if args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n+ cmd = get_launch_prefix()\n+ cmd.extend(\n+ [\n+ \"--nproc_per_node\",\n+ str(args.num_processes // args.num_machines),\n+ \"--nnodes\",\n+ str(args.num_machines),\n+ \"--node_rank\",\n+ str(args.machine_rank),\n+ \"--master_addr\",\n+ args.main_process_ip,\n+ \"--master_port\",\n+ str(args.main_process_port),\n+ ]\n+ )\n+ else:\n+ cmd.extend(\n+ [\"--hostfile\", str(args.deepspeed_hostfile), \"--launcher\", str(args.deepspeed_multinode_launcher)]\n+ )\n+ if args.deepspeed_exclusion_filter is not None:\n+ cmd.extend(\n+ [\n+ \"--exclude\",\n+ str(args.deepspeed_exclusion_filter),\n+ ]\n+ )\n+ elif args.deepspeed_inclusion_filter is not None:\n+ cmd.extend(\n+ [\n+ \"--include\",\n+ str(args.deepspeed_inclusion_filter),\n+ ]\n+ )\n+ else:\n+ cmd.extend([\"--num_gpus\", str(args.num_processes // args.num_machines)])\n else:\n cmd.extend([\"--num_gpus\", str(args.num_processes)])\n \n@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable\n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n current_env[\"USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n@@ -361,6 +409,13 @@ def deepspeed_launcher(args):\n current_env[\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file).lower()\n \n+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n+ with open(\".deepspeed_env\", \"a\") as f:\n+ for key, value in current_env.items():\n+ if \";\" in value or \" \" in value:\n+ continue\n+ f.write(f\"{key}={value}\\n\")\n+\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex 173cfa790..5e31a96c9 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -24,5 +24,6 @@\n SAGEMAKER_PYTHON_VERSION = \"py38\"\n SAGEMAKER_TRANSFORMERS_VERSION = \"4.17.0\"\n SAGEMAKER_PARALLEL_EC2_INSTANCES = [\"ml.p3.16xlarge\", \"ml.p3dn.24xlarge\", \"ml.p4dn.24xlarge\"]\n+DEEPSPEED_MULTINODE_LAUNCHERS = [\"pdsh\", \"standard\", \"openmpi\", \"mvapich\"]\n \n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\n", "code_comments": [ { "body": "Could we add some documentation on the format of the hostfile? Then we can link it from here.", "diff_hunk": "@@ -144,6 +145,46 @@ def get_cluster_input():\n \"Please run `pip3 install transformers`.\"\n )\n \n+ if num_machines > 1:\n+ deepspeed_config[\"deepspeed_hostfile\"] = _ask_field(\n+ \"DeepSpeed configures multi-node compute resources with hostfile, please specify the location of hostfile: \",", "from_author": false }, { "body": "Maybe start with this question and ask if users want to use the traditional PyTorch DDP launcher here, in which case the other questions are moot.", "diff_hunk": "@@ -144,6 +145,46 @@ def get_cluster_input():\n \"Please run `pip3 install transformers`.\"\n )\n \n+ if num_machines > 1:\n+ deepspeed_config[\"deepspeed_hostfile\"] = _ask_field(\n+ \"DeepSpeed configures multi-node compute resources with hostfile, please specify the location of hostfile: \",\n+ lambda x: str(x),\n+ )\n+\n+ is_exclusion_filter = _ask_field(\n+ \"Do you want to specify exclusion filter string? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if is_exclusion_filter:\n+ deepspeed_config[\"deepspeed_exclusion_filter\"] = _ask_field(\n+ \"DeepSpeed exclusion filter string: \",\n+ lambda x: str(x),\n+ )\n+\n+ is_inclusion_filter = _ask_field(\n+ \"Do you want to specify inclusion filter string? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if is_inclusion_filter:\n+ deepspeed_config[\"deepspeed_inclusion_filter\"] = _ask_field(\n+ \"DeepSpeed inclusion filter string: \",\n+ lambda x: str(x),\n+ )\n+\n+ launcher_query = \"Which Type of launcher do you want to use \"\n+ for i, launcher in enumerate(DEEPSPEED_MULTINODE_LAUNCHERS):\n+ launcher_query += f\"[{i}] {launcher}, \"\n+ launcher_query = launcher_query[:-2] + \")? [0]: \"\n+ deepspeed_config[\"deepspeed_multinode_launcher\"] = _ask_field(", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -144,6 +145,46 @@ def get_cluster_input():\n \"Please run `pip3 install transformers`.\"\n )\n \n+ if num_machines > 1:\n+ deepspeed_config[\"deepspeed_hostfile\"] = _ask_field(\n+ \"DeepSpeed configures multi-node compute resources with hostfile, please specify the location of hostfile: \",", "from_author": true }, { "body": "this is a problem, @pacman100 - how can a user then use `PYTHONPATH` if it's being hijacked by the launcher?\r\n\r\nIf you must manipulate `PYTHONPATH` you need to extend it and not override it.", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": false }, { "body": "but also why would you set something that should be `path1:...:pathn` to find python libraries to a path to an executable which is not a directory.", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": false }, { "body": "Proposed fix https://github.com/huggingface/accelerate/pull/741\r\n", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": false }, { "body": "I don't remember but it was needed to know the python env on other nodes to run the code. If I don't do that, deepspeed code will still hijack it: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/launcher/runner.py#L480-L484.\r\n\r\nBelow is the run with this line commented and you can notice that PYTHONPATH is still overridden by the above deepspeed code:\r\n\r\n<img width=\"1502\" alt=\"Screenshot 2022-10-05 at 12 35 26 PM\" src=\"https://user-images.githubusercontent.com/13534540/194000694-8d5aa407-7ff3-42a0-9f84-908e8ae67713.png\">\r\n ", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": true }, { "body": "The code you linked to:\r\n```\r\n curr_path = os.path.abspath('.')\r\n if 'PYTHONPATH' in env:\r\n env['PYTHONPATH'] = curr_path + \":\" + env['PYTHONPATH']\r\n else:\r\n env['PYTHONPATH'] = curr_path\r\n```\r\n\r\n1. it doesn't override the setting, it appends to it:\r\n2. it doesn't add `sys.executable` but the current working directory. I think sort of pushing ``pwd`` of the launched-from-dir.", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": false }, { "body": "Hello Stas, yeah, you are correct, my bad. After spending quite some time diving deep and setting up the multi-node setup to test things out, I have found the reason.\r\n\r\nIn Accelerate, all the DeepSpeed config from `accelerate config` command is passed as env variables, as shown towards the end of the above screenshot. DeepSpeed uses `.deepspeed_env` file for all these \r\nadditional env variables instead of using the passed env variables as mentioned here: https://www.deepspeed.ai/getting-started/#multi-node-environment-variables\r\n\r\nSo, I was adding all the env variables to `.deepspeed_env` file as seen here: https://github.com/huggingface/accelerate/blob/main/src/accelerate/commands/launch.py#L590-L594\r\n. The DeepSpeed snippet from the above message would extend/write to PYTHONPATH. However, just after that things will be read from `.deepspeed_env` and env variables will be reset or newly added. Here, the `pwd` extension would be reset and the original PYTHONPATH if sent from accelerate will be used instead of extended one. I overlooked extension usecase and confused `pwd` for python env path and just added that directly to the launch env variables leading to unintended behaviour. ", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": true }, { "body": "Thank you for the extended commentary that gives more context to the problem, @pacman100 \r\n\r\nAnd it looks like your suggestion in the PR resolving this issue. which has been merged now, so resolving this conversation. \r\n\r\nThank you!", "diff_hunk": "@@ -350,6 +397,7 @@ def deepspeed_launcher(args):\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n+ current_env[\"PYTHONPATH\"] = sys.executable", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/514", "pr_id": 995100364 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 354089c38..e8ff21884 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -855,17 +855,23 @@ def gather(self, tensor):\n \"\"\"\n return gather(tensor)\n \n- def reduce(self, tensor: torch.Tensor, reduction=\"sum\"):\n+ def reduce(self, tensor, reduction=\"sum\"):\n \"\"\"\n Reduce the values in *tensor* across all processes based on *reduction*.\n \n+ Note:\n+ All processes get the reduced value.\n+\n Args:\n- tensor (`torch.Tensor`):\n+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n The tensors to reduce across all processes.\n reduction (`str`, *optional*, defaults to \"sum\"):\n A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.\n+\n+ Returns:\n+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The reduced tensor(s).\n \"\"\"\n- reduce(tensor, reduction)\n+ return reduce(tensor, reduction)\n \n def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\n \"\"\"\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 46753513e..7505c04ce 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -430,7 +430,7 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\n xm.all_reduce(\"sum\", cloned_tensor)\n return cloned_tensor\n elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n- torch.distributed.reduce(cloned_tensor, ReduceOp.SUM)\n+ torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\n return cloned_tensor\n else:\n if reduction == \"sum\":\n", "code_comments": [], "context": [ { "body": "Fix a bug when using reduce at a tensor.\r\n\r\nBefore:\r\n```\r\na = torch.tensor(accelerator.process_index, device=accelerator.device)\r\n\r\naccelerator.reduce(a, reduction='sum') # don't change a\r\n# or\r\na = accelerator.reduce(a, reduction='sum') # a is None\r\n```\r\n\r\nAfter:\r\n```\r\na = torch.tensor(accelerator.process_index, device=accelerator.device)\r\na = accelerator.reduce(a, reduction='sum') # a is correctly reduced\r\n```\r\n", "from_author": true }, { "body": "Thanks for the fix! πŸ˜„", "from_author": false }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "When I update the doc for this pr, I find that the `accelerate.utils.reduce` seems have inconsistent behavior on different type of devices.\r\n\r\nIn detail, it will reduce value on all processes, i.e. `all_reduce` when device is TPU. But only reduce to the main process, i.e. `reduce`, when device is GPU.\r\n\r\nCould I change the behavior to `all_reduce` when device is GPU ?\r\n\r\n\r\nFor more infomation, see the doc stuff [here](https://github.com/wwhio/accelerate/blob/96a17aae1164da5511a50d7138bd6f8ae5303267/src/accelerate/accelerator.py#L862-L864) and the `accelerate.utils.reduce` code [here](https://github.com/wwhio/accelerate/blob/96a17aae1164da5511a50d7138bd6f8ae5303267/src/accelerate/utils/operations.py#L411-L441).\r\n", "from_author": true }, { "body": "Yes, I think it should also be `all_reduce` on GPU to avoid bugs when not all processes have the same tensor later in the code.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/513", "pr_id": 994934885 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 04571e50e..c2fd77282 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -9,6 +9,8 @@\n - sections:\n - local: big_modeling\n title: Handling big models\n+ - local: gradient_accumulation\n+ title: Gradient accumulation\n - local: sagemaker\n title: Amazon SageMaker\n title: Guides\ndiff --git a/docs/source/gradient_accumulation.mdx b/docs/source/gradient_accumulation.mdx\nnew file mode 100644\nindex 000000000..f6ab857d8\n--- /dev/null\n+++ b/docs/source/gradient_accumulation.mdx\n@@ -0,0 +1,126 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only stepping the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in πŸ€— Accelerate,\n+which can total to adding just one new line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index + 1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```diff\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+\n++ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++ )\n+\n+ for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization.\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number \n+of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n+\n+```diff\n+ from accelerate import Accelerator\n+- accelerator = Accelerator()\n++ accelerator = Accelerator(gradient_accumulation_steps=2)\n+```\n+\n+From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n+You just wrap it around the entire training part of your code: \n+\n+```diff\n+- for index, batch in enumerate(training_dataloader):\n++ for batch in training_dataloader:\n++ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+```\n+\n+and you can remove all the special checks for the step number and the loss adjustment:\n+\n+```diff\n+- loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+- if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. \n+\n+## The finished code\n+\n+Below is the finished implementation for performing gradient accumulation with πŸ€— Accelerate\n+\n+```python\n+for batch in training_dataloader:\n+ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n", "code_comments": [ { "body": "Make sure to add a consistent indent level of 2 for the base, so that you can have `+ ` or `- ` without changing indentation on any line.", "diff_hunk": "@@ -0,0 +1,126 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only stepping the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in πŸ€— Accelerate,\n+which can total to adding just one new line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -0,0 +1,126 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only stepping the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in πŸ€— Accelerate,\n+which can total to adding just one new line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to `Accelerator`, dictating the number \n+of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n+\n+```diff\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n+```\n+\n+From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n+You just wrap it around the entire training part of our code: \n+\n+```diff\n+-for index, batch in enumerate(training_dataloader):\n++for batch in training_dataloader:\n++ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+```", "from_author": false }, { "body": "```suggestion\r\nAll that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number \r\n```", "diff_hunk": "@@ -0,0 +1,126 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only stepping the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in πŸ€— Accelerate,\n+which can total to adding just one new line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to `Accelerator`, dictating the number ", "from_author": false }, { "body": "Same there + the step lines change since they lose and indent level.", "diff_hunk": "@@ -0,0 +1,126 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only stepping the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in πŸ€— Accelerate,\n+which can total to adding just one new line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to `Accelerator`, dictating the number \n+of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n+\n+```diff\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n+```\n+\n+From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n+You just wrap it around the entire training part of our code: \n+\n+```diff\n+-for index, batch in enumerate(training_dataloader):\n++for batch in training_dataloader:\n++ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+```\n+\n+and you can remove all the special checks for the step number and the loss adjustment:\n+\n+```diff\n+-loss = loss / gradient_accumulation_steps\n+accelerator.backward(loss)\n+-if (index+1) % gradient_accumulation_steps == 0:\n+optimizer.step()\n+scheduler.step()\n+```", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/511", "pr_id": 994424994 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 7e99c76ef..354089c38 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -240,10 +240,6 @@ def __init__(\n raise NotImplementedError(\n \"Gradient accumulation on TPU is not supported. Pass in `gradient_accumulation_steps=1`\"\n )\n- if dispatch_batches:\n- raise NotImplementedError(\n- \"Gradient accumulation with dispatched dataloaders is not supported. Pass in `gradient_accumulation_steps=1` or `dispatch_batches=False`\"\n- )\n \n self.gradient_accumulation_steps = gradient_accumulation_steps\n self.device_placement = device_placement\n@@ -397,7 +393,7 @@ def _do_sync(self):\n self.gradient_state._set_sync_gradients(True)\n else:\n self.step += 1\n- self.gradient_state._set_sync_gradients((self.step % self.gradient_accumulation_steps) == 0)\n+ self.gradient_state._set_sync_gradients((self.step % self.gradient_accumulation_steps) == 0)\n \n @property\n def sync_gradients(self):\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex a2c7a4a9b..fddfe8955 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -392,74 +392,87 @@ def __init__(self, dataset, split_batches: bool = False, **kwargs):\n self.gradient_state = GradientState()\n self.state = AcceleratorState()\n \n- def __iter__(self):\n- state = AcceleratorState()\n- if state.process_index == 0:\n- # We only iterate through the DataLoader on process 0.\n- main_iterator = super().__iter__()\n- stop_iteration = False\n- first_batch = None\n- while not stop_iteration:\n- # On process 0, we gather the batch to dispatch.\n- if state.process_index == 0:\n- try:\n- if self.split_batches:\n- # One batch of the main iterator is dispatched and split.\n- batch = next(main_iterator)\n- else:\n- # num_processes batches of the main iterator are concatenated then dispatched and split.\n- # We add the batches one by one so we have the remainder available when drop_last=False.\n- batches = []\n- for _ in range(state.num_processes):\n- batches.append(next(main_iterator))\n- batch = concatenate(batches, dim=0)\n- # In both cases, we need to get the structure of the batch that we will broadcast on other\n- # processes to initialize the tensors with the right shape.\n- # data_structure, stop_iteration\n+ def _fetch_batches(self, iterator):\n+ batches, batch = None, None\n+ # On process 0, we gather the batch to dispatch.\n+ if self.state.process_index == 0:\n+ try:\n+ if self.split_batches:\n+ # One batch of the main iterator is dispatched and split.\n+ batch = next(iterator)\n+ else:\n+ # num_processes batches of the main iterator are concatenated then dispatched and split.\n+ # We add the batches one by one so we have the remainder available when drop_last=False.\n+ batches = []\n+ for _ in range(self.state.num_processes):\n+ batches.append(next(iterator))\n+ batch = concatenate(batches, dim=0)\n+ # In both cases, we need to get the structure of the batch that we will broadcast on other\n+ # processes to initialize the tensors with the right shape.\n+ # data_structure, stop_iteration\n+ batch_info = [get_data_structure(batch), False]\n+ except StopIteration:\n+ batch_info = [None, True]\n+ else:\n+ batch_info = [None, self._stop_iteration]\n+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.\n+ broadcast_object_list(batch_info)\n+ self._stop_iteration = batch_info[1]\n+ if self._stop_iteration:\n+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.\n+ if not self.split_batches and not self.drop_last:\n+ if self.state.process_index == 0 and len(batches) > 0:\n+ batch = concatenate(batches, dim=0)\n batch_info = [get_data_structure(batch), False]\n- except StopIteration:\n+ else:\n batch_info = [None, True]\n+ broadcast_object_list(batch_info)\n+ if batch_info[1]:\n+ return batch, batch_info, True\n else:\n- batch_info = [None, stop_iteration]\n-\n- # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.\n- broadcast_object_list(batch_info)\n- stop_iteration = batch_info[1]\n- if stop_iteration:\n- # If drop_last is False and split_batches is False, we may have a remainder to take care of.\n- if not self.split_batches and not self.drop_last:\n- if state.process_index == 0 and len(batches) > 0:\n- batch = concatenate(batches, dim=0)\n- batch_info = [get_data_structure(batch), False]\n- else:\n- batch_info = [None, True]\n- broadcast_object_list(batch_info)\n- if batch_info[1]:\n- continue\n- else:\n- continue\n+ return batch, batch_info, True\n+ return batch, batch_info, False\n \n- if state.process_index != 0:\n+ def __iter__(self):\n+ self.gradient_state._set_end_of_dataloader(False)\n+ main_iterator = None\n+ if self.state.process_index == 0:\n+ # We only iterate through the DataLoader on process 0.\n+ main_iterator = super().__iter__()\n+ self._stop_iteration = False\n+ first_batch = None\n+ batch, batch_info, skip = self._fetch_batches(main_iterator)\n+ while True:\n+ if skip:\n+ continue\n+ if self.state.process_index != 0:\n # Initialize tensors on other processes than process 0.\n batch = initialize_tensors(batch_info[0])\n- batch = send_to_device(batch, state.device)\n+ batch = send_to_device(batch, self.state.device)\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n \n if not self.drop_last and first_batch is None:\n # We keep at least num processes elements of the first batch to be able to complete the last batch\n- first_batch = slice_tensors(batch, slice(0, state.num_processes))\n+ first_batch = slice_tensors(batch, slice(0, self.state.num_processes))\n \n observed_batch_size = find_batch_size(batch)\n- batch_size = observed_batch_size // state.num_processes\n+ batch_size = observed_batch_size // self.state.num_processes\n \n- if not self.drop_last and stop_iteration and observed_batch_size % state.num_processes != 0:\n+ if not self.drop_last and self._stop_iteration and observed_batch_size % self.state.num_processes != 0:\n # If the last batch is not complete, let's add the first batch to it.\n batch = concatenate([batch, first_batch], dim=0)\n batch_size += 1\n \n- data_slice = slice(state.process_index * batch_size, (state.process_index + 1) * batch_size)\n- yield slice_tensors(batch, data_slice)\n+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\n+ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator)\n+ if not self._stop_iteration:\n+ yield slice_tensors(batch, data_slice)\n+ batch, batch_info, skip = next_batch, next_batch_info, next_skip\n+ else:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ yield slice_tensors(batch, data_slice)\n+ break\n \n def __len__(self):\n whole_length = super().__len__()\ndiff --git a/src/accelerate/test_utils/scripts/test_sync.py b/src/accelerate/test_utils/scripts/test_sync.py\nindex 0b0dc7abd..ae5a2c65b 100644\n--- a/src/accelerate/test_utils/scripts/test_sync.py\n+++ b/src/accelerate/test_utils/scripts/test_sync.py\n@@ -25,7 +25,7 @@\n from accelerate.utils import DistributedType, set_seed\n \n \n-def check_model_parameters(model_a, model_b, did_step):\n+def check_model_parameters(model_a, model_b, did_step, iteration):\n for param, grad_param in zip(model_a.parameters(), model_b.parameters()):\n if not param.requires_grad:\n continue\n@@ -33,12 +33,12 @@ def check_model_parameters(model_a, model_b, did_step):\n # Grads should not be in sync\n assert (\n torch.allclose(param.grad, grad_param.grad) is False\n- ), f\"Gradients in sync when they should not be:\\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})\"\n+ ), f\"Gradients in sync when they should not be at iteration {iteration}:\\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})\"\n else:\n # Grads should be in sync\n assert (\n torch.allclose(param.grad, grad_param.grad) is True\n- ), f\"Gradients not in sync when they should be:\\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})\"\n+ ), f\"Gradients not in sync when they should be at iteration {iteration}:\\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})\"\n \n \n def step_model(model, input, target, accelerator, do_backward=True):\n@@ -96,7 +96,7 @@ def test_noop_sync(accelerator):\n step_model(ddp_model, ddp_input, ddp_target, accelerator)\n \n # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync\n- check_model_parameters(model, ddp_model, True)\n+ check_model_parameters(model, ddp_model, True, iteration)\n for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):\n if not param.requires_grad:\n continue\n@@ -106,7 +106,7 @@ def test_noop_sync(accelerator):\n \n # Shuffle ddp_input on each iteration\n torch.manual_seed(1337 + iteration)\n- ddp_input = ddp_input[torch.randperm(16)]\n+ ddp_input = ddp_input[torch.randperm(len(ddp_input))]\n \n \n def test_distributed_sync(accelerator):\n@@ -146,11 +146,13 @@ def test_distributed_sync(accelerator):\n \n # Shuffle ddp_input on each iteration\n torch.manual_seed(1337 + iteration)\n- ddp_input = ddp_input[torch.randperm(16)]\n+ ddp_input = ddp_input[torch.randperm(len(ddp_input))]\n \n \n-def test_gradient_accumulation():\n- accelerator = Accelerator(gradient_accumulation_steps=2)\n+def test_gradient_accumulation(split_batches=False, dispatch_batches=False):\n+ accelerator = Accelerator(\n+ gradient_accumulation_steps=2, split_batches=split_batches, dispatch_batches=dispatch_batches\n+ )\n # Test that context manager behaves properly\n model, ddp_model, dataloader = get_training_setup(accelerator)\n for iteration, batch in enumerate(dataloader):\n@@ -181,11 +183,13 @@ def test_gradient_accumulation():\n \n # Shuffle ddp_input on each iteration\n torch.manual_seed(1337 + iteration)\n- ddp_input = ddp_input[torch.randperm(16)]\n+ ddp_input = ddp_input[torch.randperm(len(ddp_input))]\n \n \n-def test_gradient_accumulation_with_opt_and_scheduler():\n- accelerator = Accelerator(gradient_accumulation_steps=2)\n+def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispatch_batches=False):\n+ accelerator = Accelerator(\n+ gradient_accumulation_steps=2, split_batches=split_batches, dispatch_batches=dispatch_batches\n+ )\n # Test that context manager behaves properly\n model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True)\n for iteration, batch in enumerate(dataloader):\n@@ -198,8 +202,11 @@ def test_gradient_accumulation_with_opt_and_scheduler():\n ddp_model.train()\n step_model(model, input, target, accelerator, False)\n opt.step()\n- for _ in range(accelerator.num_processes):\n+ if split_batches:\n sched.step()\n+ else:\n+ for _ in range(accelerator.num_processes):\n+ sched.step()\n opt.zero_grad()\n # Perform gradient accumulation under wrapper\n with accelerator.accumulate(ddp_model):\n@@ -209,10 +216,12 @@ def test_gradient_accumulation_with_opt_and_scheduler():\n ddp_opt.zero_grad()\n \n # Learning rates should be the same\n- assert opt.param_groups[0][\"lr\"] == ddp_opt.param_groups[0][\"lr\"]\n- did_step = (((iteration + 1) % 2) == 0) or (iteration == (len(dataloader) - 1))\n+ assert (\n+ opt.param_groups[0][\"lr\"] == ddp_opt.param_groups[0][\"lr\"]\n+ ), f'Learning rates found in each optimizer did not align\\nopt: {opt.param_groups[0][\"lr\"]}\\nDDP opt: {ddp_opt.param_groups[0][\"lr\"]}\\n'\n+ did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader))\n if accelerator.num_processes > 1:\n- check_model_parameters(model, ddp_model, did_step)\n+ check_model_parameters(model, ddp_model, did_step, iteration)\n # Shuffle ddp_input on each iteration\n torch.manual_seed(1337 + iteration)\n \n@@ -229,12 +238,31 @@ def main():\n print(\"**Test Distributed `no_sync` context manager**\")\n test_distributed_sync(accelerator)\n if state.distributed_type == DistributedType.MULTI_GPU:\n- if state.local_process_index == 0:\n- print(\"**Test `accumulate` gradient accumulation**\")\n- test_gradient_accumulation()\n+ for split_batch in [True, False]:\n+ for dispatch_batches in [True, False]:\n+ if state.local_process_index == 0:\n+ print(\n+ \"**Test `accumulate` gradient accumulation, \",\n+ f\"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**\",\n+ )\n+ test_gradient_accumulation(split_batch)\n if state.local_process_index == 0:\n- print(\"**Test `accumulate` gradient accumulation with optimizer and scheduler**\")\n+ print(\n+ \"**Test `accumulate` gradient accumulation with optimizer and scheduler, \",\n+ \"`split_batches=False`, `dispatch_batches=False`**\",\n+ )\n test_gradient_accumulation_with_opt_and_scheduler()\n+ if state.distributed_type == DistributedType.MULTI_GPU:\n+ for split_batch in [True, False]:\n+ for dispatch_batches in [True, False]:\n+ if not split_batch and not dispatch_batches:\n+ continue\n+ if state.local_process_index == 0:\n+ print(\n+ \"**Test `accumulate` gradient accumulation with optimizer and scheduler, \",\n+ f\"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**\",\n+ )\n+ test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches)\n \n \n def _mp_fn(index):\n", "code_comments": [ { "body": "Should be split in several lines to stay under the max char len of 119.", "diff_hunk": "@@ -229,12 +238,28 @@ def main():\n print(\"**Test Distributed `no_sync` context manager**\")\n test_distributed_sync(accelerator)\n if state.distributed_type == DistributedType.MULTI_GPU:\n- if state.local_process_index == 0:\n- print(\"**Test `accumulate` gradient accumulation**\")\n- test_gradient_accumulation()\n+ for split_batch in [True, False]:\n+ for dispatch_batches in [True, False]:\n+ if state.local_process_index == 0:\n+ print(\n+ f\"**Test `accumulate` gradient accumulation, `split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**\"\n+ )\n+ test_gradient_accumulation(split_batch)\n if state.local_process_index == 0:\n- print(\"**Test `accumulate` gradient accumulation with optimizer and scheduler**\")\n+ print(\n+ \"**Test `accumulate` gradient accumulation with optimizer and scheduler, `split_batches=False`, `dispatch_batches=False`**\"\n+ )\n test_gradient_accumulation_with_opt_and_scheduler()\n+ if state.distributed_type == DistributedType.MULTI_GPU:\n+ for split_batch in [True, False]:\n+ for dispatch_batches in [True, False]:\n+ if not split_batch and not dispatch_batches:\n+ continue\n+ if state.local_process_index == 0:\n+ print(\n+ f\"**Test `accumulate` gradient accumulation with optimizer and scheduler, `split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**\"\n+ )", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/510", "pr_id": 994398341 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex f58752e53..a2c7a4a9b 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -463,7 +463,9 @@ def __iter__(self):\n \n def __len__(self):\n whole_length = super().__len__()\n- if self.drop_last:\n+ if self.split_batches:\n+ return whole_length\n+ elif self.drop_last:\n return whole_length // self.state.num_processes\n else:\n return math.ceil(whole_length / self.state.num_processes)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/509", "pr_id": 994366202 }, { "diff": "diff --git a/Makefile b/Makefile\nindex 58cb89ab9..6dab6af11 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -1,6 +1,6 @@\n .PHONY: quality style test docs\n \n-check_dirs := tests src examples\n+check_dirs := tests src examples benchmarks\n \n # Check that source code meets quality standards\n \ndiff --git a/benchmarks/README.md b/benchmarks/README.md\nnew file mode 100644\nindex 000000000..243e9df58\n--- /dev/null\n+++ b/benchmarks/README.md\n@@ -0,0 +1,46 @@\n+# Big model inference benchmarks\n+\n+Running inference with Accelerate on big models.\n+\n+## Setup\n+\n+These benchmarks use the `transformers` library:\n+\n+```bash\n+pip install transformers\n+```\n+\n+To reproduce or test a new setup, run\n+\n+```py\n+python inference_acc.py model_name\n+```\n+\n+This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.\n+\n+To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.\n+\n+If you get an error linked to disk offload, you need to add the option `--disk-offload`\n+\n+## Results\n+\n+On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).\n+\n+| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |\n+|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|\n+| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |\n+| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |\n+| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |\n+| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |\n+| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |\n+| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |\n+| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |\n+\n+Note on the results:\n+- using two GPUs instead of one does not slow down generation\n+- using CPU offload slows down a bit (see OPT-30b)\n+- using disk offload slows down a lot (need to implement prefetching)\n+\n+You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:\n+- peak GPU memory is exactly the size of the model put on a given GPU\n+- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.\n\\ No newline at end of file\ndiff --git a/benchmarks/big_model_inference.py b/benchmarks/big_model_inference.py\nnew file mode 100644\nindex 000000000..cb832d128\n--- /dev/null\n+++ b/benchmarks/big_model_inference.py\n@@ -0,0 +1,143 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import time\n+\n+import torch\n+\n+import transformers\n+from accelerate.utils import compute_module_sizes\n+from measures_util import end_measure, log_measures, start_measure\n+from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n+\n+\n+DEFAULT_MODELS = {\n+ \"gpt-j-6b\": {\"is_causal\": True, \"model\": \"sgugger/sharded-gpt-j-6B\", \"tokenizer\": \"EleutherAI/gpt-j-6B\"},\n+ \"gpt-neox\": {\"is_causal\": True, \"model\": \"EleutherAI/gpt-neox-20b\"},\n+ \"opt\": {\"is_causal\": True, \"model\": \"facebook/opt-30b\"},\n+ \"T0pp\": {\"is_causal\": False, \"model\": \"bigscience/T0pp\", \"model_revision\": \"sharded\"},\n+}\n+\n+PROMPTS = [\n+ \"Hello, my name is\",\n+ \"Are unicorns real? Unicorns are\",\n+ \"For the first time in several years,\",\n+ \"My name is Julien and I am\",\n+ \"The goal of life is\",\n+ \"Whenever I'm sad, I like to\",\n+]\n+\n+\n+def parse_args():\n+ parser = argparse.ArgumentParser(description=\"Run and time generations on a big model using Accelerate.\")\n+ parser.add_argument(\"model_name\", type=str, default=None, help=\"The name of the model to try.\")\n+ parser.add_argument(\n+ \"--tokenizer_name\", type=str, default=None, help=\"The name of the tokenizer (if different from the model.\"\n+ )\n+ parser.add_argument(\"--is_causal\", type=bool, default=None, help=\"Whether or not the model is causal.\")\n+ parser.add_argument(\n+ \"--model_revision\", type=str, default=None, help=\"The revision to use for the model checkpoint.\"\n+ )\n+ parser.add_argument(\"--torch_dtype\", type=str, default=None, help=\"The dtype for the model.\")\n+ parser.add_argument(\"--disk_offload\", action=\"store_true\")\n+\n+ args = parser.parse_args()\n+\n+ # Sanitize args\n+ if args.model_name in DEFAULT_MODELS:\n+ defaults = DEFAULT_MODELS[args.model_name]\n+ args.model_name = defaults[\"model\"]\n+ if args.tokenizer_name is None:\n+ args.tokenizer_name = defaults.get(\"tokenizer\", args.model_name)\n+ if args.is_causal is None:\n+ args.is_causal = defaults[\"is_causal\"]\n+ if args.model_revision is None:\n+ args.model_revision = defaults.get(\"model_revision\", \"main\")\n+\n+ if args.is_causal is None:\n+ raise ValueError(\"Could not infer the default for `--is_causal`, pass either True or False for it.\")\n+ if args.tokenizer_name is None:\n+ args.tokenizer_name = args.model_name\n+ if args.model_revision is None:\n+ args.model_revision = \"main\"\n+\n+ return args\n+\n+\n+def main():\n+ transformers.utils.logging.set_verbosity_error()\n+ args = parse_args()\n+\n+ if args.torch_dtype is None:\n+ config = AutoConfig.from_pretrained(args.model_name)\n+ torch_dtype = getattr(config, \"torch_dtype\", torch.float32)\n+ else:\n+ torch_dtype = getattr(torch, args.torch_dtype)\n+ model_cls = AutoModelForCausalLM if args.is_causal else AutoModelForSeq2SeqLM\n+ kwargs = {\n+ \"torch_dtype\": torch_dtype,\n+ \"revision\": args.model_revision,\n+ }\n+ if args.disk_offload:\n+ kwargs[\"offload_folder\"] = \"tmp_offload\"\n+ kwargs[\"offload_state_dict\"] = True\n+\n+ start_measures = start_measure()\n+ model = model_cls.from_pretrained(args.model_name, device_map=\"auto\", **kwargs)\n+ end_measures = end_measure(start_measures)\n+ log_measures(end_measures, \"Model loading\")\n+\n+ module_sizes = compute_module_sizes(model)\n+ device_size = {v: 0 for v in model.hf_device_map.values()}\n+ for module, device in model.hf_device_map.items():\n+ device_size[device] += module_sizes[module]\n+ message = \"\\n\".join([f\"- {device}: {size // 2**20}MiB\" for device, size in device_size.items()])\n+ print(f\"\\nTheoretical use:\\n{message}\")\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)\n+\n+ start_measures = start_measure()\n+ generation_times = []\n+ gen_tokens = []\n+ texts_outs = []\n+ for prompt in PROMPTS:\n+ inputs = tokenizer(prompt, return_tensors=\"pt\").to(0)\n+ tokens = inputs[\"input_ids\"][0].tolist()\n+ before_generate = time.time()\n+ outputs = model.generate(inputs[\"input_ids\"])\n+ after_generate = time.time()\n+ outputs = outputs[0].tolist()\n+ num_gen_tokens = len(outputs) if outputs[: len(tokens)] != tokens else len(outputs) - len(tokens)\n+ generation_time = after_generate - before_generate\n+\n+ text_out = tokenizer.decode(outputs, skip_special_tokens=True)\n+ texts_outs.append(text_out)\n+ generation_times.append(generation_time)\n+ gen_tokens.append(num_gen_tokens)\n+ print(f\"Prompt: {prompt}\\nGeneration {text_out}\\nIn {generation_time:.2f}s for {num_gen_tokens} tokens\\n\")\n+\n+ end_measures = end_measure(start_measures)\n+ log_measures(end_measures, \"Model generation\")\n+\n+ generation_times_per_token = [gen / tok for gen, tok in zip(generation_times, gen_tokens)]\n+ avg_gen = sum(generation_times_per_token) / len(generation_times)\n+ print(f\"Average time of generation per token: {avg_gen:.2f}s\")\n+ print(f\"First generation (avg time per token): {generation_times_per_token[0]:.2f}s\")\n+ avg_gen = sum(generation_times_per_token[1:]) / (len(generation_times_per_token) - 1)\n+ print(f\"Average time of generation per token (excluding the first): {avg_gen:.2f}s\")\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/benchmarks/measures_util.py b/benchmarks/measures_util.py\nnew file mode 100644\nindex 000000000..b6ac76b54\n--- /dev/null\n+++ b/benchmarks/measures_util.py\n@@ -0,0 +1,86 @@\n+import gc\n+import threading\n+import time\n+\n+import torch\n+\n+import psutil\n+\n+\n+class PeakCPUMemory:\n+ def __init__(self):\n+ self.process = psutil.Process()\n+ self.peak_monitoring = False\n+\n+ def peak_monitor(self):\n+ self.cpu_memory_peak = -1\n+\n+ while True:\n+ self.cpu_memory_peak = max(self.process.memory_info().rss, self.cpu_memory_peak)\n+\n+ # can't sleep or will not catch the peak right (this comment is here on purpose)\n+ if not self.peak_monitoring:\n+ break\n+\n+ def start(self):\n+ self.peak_monitoring = True\n+ self.thread = threading.Thread(target=self.peak_monitor)\n+ self.thread.daemon = True\n+ self.thread.start()\n+\n+ def stop(self):\n+ self.peak_monitoring = False\n+ self.thread.join()\n+ return self.cpu_memory_peak\n+\n+\n+cpu_peak_tracker = PeakCPUMemory()\n+\n+\n+def start_measure():\n+ # Time\n+ measures = {\"time\": time.time()}\n+\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+\n+ # CPU mem\n+ measures[\"cpu\"] = psutil.Process().memory_info().rss\n+ cpu_peak_tracker.start()\n+\n+ # GPU mem\n+ for i in range(torch.cuda.device_count()):\n+ measures[str(i)] = torch.cuda.memory_allocated(i)\n+ torch.cuda.reset_peak_memory_stats()\n+\n+ return measures\n+\n+\n+def end_measure(start_measures):\n+ # Time\n+ measures = {\"time\": time.time() - start_measures[\"time\"]}\n+\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+\n+ # CPU mem\n+ measures[\"cpu\"] = (psutil.Process().memory_info().rss - start_measures[\"cpu\"]) / 2**20\n+ measures[\"cpu-peak\"] = (cpu_peak_tracker.stop() - start_measures[\"cpu\"]) / 2**20\n+\n+ # GPU mem\n+ for i in range(torch.cuda.device_count()):\n+ measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20\n+ measures[f\"{i}-peak\"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20\n+\n+ return measures\n+\n+\n+def log_measures(measures, description):\n+ print(f\"{description}:\")\n+ print(f\"- Time: {measures['time']:.2f}s\")\n+ for i in range(torch.cuda.device_count()):\n+ print(f\"- GPU {i} allocated: {measures[str(i)]:.2f}MiB\")\n+ peak = measures[f\"{i}-peak\"]\n+ print(f\"- GPU {i} peak: {peak:.2f}MiB\")\n+ print(f\"- CPU RAM allocated: {measures['cpu']:.2f}MiB\")\n+ print(f\"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB\")\n", "code_comments": [ { "body": "```suggestion\r\nThese benchmarks use the `transformers` library:\r\n```", "diff_hunk": "@@ -0,0 +1,46 @@\n+# Big model inference benchmarks\n+\n+Running inference with Accelerate on big models.\n+\n+## Setup\n+\n+These benchmarks use the `Transformers` library:", "from_author": false }, { "body": "Do they not also need `measures_util`?\r\n\r\nMaybe add a `pip install accelerate[bench]`?", "diff_hunk": "@@ -0,0 +1,46 @@\n+# Big model inference benchmarks\n+\n+Running inference with Accelerate on big models.\n+\n+## Setup\n+\n+These benchmarks use the `Transformers` library:\n+\n+```bash\n+pip install transformers\n+```", "from_author": false }, { "body": "Forgot one file :grimacing: ", "diff_hunk": "@@ -0,0 +1,46 @@\n+# Big model inference benchmarks\n+\n+Running inference with Accelerate on big models.\n+\n+## Setup\n+\n+These benchmarks use the `Transformers` library:\n+\n+```bash\n+pip install transformers\n+```", "from_author": true } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_506). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/506", "pr_id": 993489844 }, { "diff": "diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex fe3d05bf3..1ef68200b 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -156,6 +156,7 @@ def __post_init__(self):\n class SageMakerConfig(BaseConfig):\n ec2_instance_type: str\n iam_role_name: str\n+ image_uri: str\n profile: Optional[str] = None\n region: str = \"us-east-1\"\n num_machines: int = 1\n@@ -163,3 +164,5 @@ class SageMakerConfig(BaseConfig):\n pytorch_version: str = SAGEMAKER_PYTORCH_VERSION\n transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION\n py_version: str = SAGEMAKER_PYTHON_VERSION\n+ sagemaker_inputs_file: str = None\n+ sagemaker_metrics_file: str = None\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex c0b17bc88..786881edf 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -20,7 +20,7 @@\n from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType\n from ...utils.imports import is_boto3_available\n from .config_args import SageMakerConfig\n-from .config_utils import _ask_field, _convert_sagemaker_distributed_mode\n+from .config_utils import _ask_field, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool\n \n \n if is_boto3_available():\n@@ -120,6 +120,42 @@ def get_sagemaker_input():\n print(f'Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials')\n _create_iam_role_for_sagemaker(iam_role_name)\n \n+ is_custom_docker_image = _ask_field(\n+ \"Do you want to use custom Docker image? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ docker_image = None\n+ if is_custom_docker_image:\n+ docker_image = _ask_field(\"Enter your Docker image: \", lambda x: str(x).lower())\n+\n+ is_sagemaker_inputs_enabled = _ask_field(\n+ \"Do you want to provide SageMaker input channels with data locations? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ sagemaker_inputs_file = None\n+ if is_sagemaker_inputs_enabled:\n+ sagemaker_inputs_file = _ask_field(\n+ \"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): \",\n+ lambda x: str(x).lower(),\n+ )\n+\n+ is_sagemaker_metrics_enabled = _ask_field(\n+ \"Do you want to enable SageMaker metrics? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ sagemaker_metrics_file = None\n+ if is_sagemaker_metrics_enabled:\n+ sagemaker_metrics_file = _ask_field(\n+ \"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): \",\n+ lambda x: str(x).lower(),\n+ )\n+\n distributed_type = _ask_field(\n \"Which type of machine are you using? ([0] No distributed training, [1] data parallelism): \",\n _convert_sagemaker_distributed_mode,\n@@ -155,6 +191,7 @@ def get_sagemaker_input():\n )\n \n return SageMakerConfig(\n+ image_uri=docker_image,\n compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,\n distributed_type=distributed_type,\n use_cpu=False,\n@@ -164,4 +201,6 @@ def get_sagemaker_input():\n iam_role_name=iam_role_name,\n mixed_precision=mixed_precision,\n num_machines=num_machines,\n+ sagemaker_inputs_file=sagemaker_inputs_file,\n+ sagemaker_metrics_file=sagemaker_metrics_file,\n )\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 421bfe064..ac91ad1e9 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -493,13 +493,44 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \"SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\n }\n # configure distribution set up\n- distribution = None # TODO: not yet implemented\n+ distribution = None\n if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:\n distribution = {\"smdistributed\": {\"dataparallel\": {\"enabled\": True}}}\n \n+ # configure sagemaker inputs\n+ sagemaker_inputs = None\n+ if sagemaker_config.sagemaker_inputs_file is not None:\n+ print(f\"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file\")\n+ sagemaker_inputs = {}\n+ with open(sagemaker_config.sagemaker_inputs_file) as file:\n+ for i, line in enumerate(file):\n+ if i == 0:\n+ continue\n+ l = line.split(\"\\t\")\n+ sagemaker_inputs[l[0]] = l[1].strip()\n+ print(f\"Loaded SageMaker Inputs: {sagemaker_inputs}\")\n+\n+ # configure sagemaker metrics\n+ sagemaker_metrics = None\n+ if sagemaker_config.sagemaker_metrics_file is not None:\n+ print(f\"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file\")\n+ sagemaker_metrics = []\n+ with open(sagemaker_config.sagemaker_metrics_file) as file:\n+ for i, line in enumerate(file):\n+ if i == 0:\n+ continue\n+ l = line.split(\"\\t\")\n+ metric_dict = {\n+ \"Name\": l[0],\n+ \"Regex\": l[1].strip(),\n+ }\n+ sagemaker_metrics.append(metric_dict)\n+ print(f\"Loaded SageMaker Metrics: {sagemaker_metrics}\")\n+\n # configure session\n print(\"Creating Estimator\")\n huggingface_estimator = HuggingFace(\n+ image_uri=sagemaker_config.image_uri,\n entry_point=entry_point,\n source_dir=source_dir,\n role=sagemaker_config.iam_role_name,\n@@ -513,9 +544,10 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n distribution=distribution,\n hyperparameters=hyperparameters,\n environment=environment,\n+ metric_definitions=sagemaker_metrics,\n )\n \n- huggingface_estimator.fit()\n+ huggingface_estimator.fit(inputs=sagemaker_inputs)\n print(f\"You can find your model data at: {huggingface_estimator.model_data}\")\n \n \n", "code_comments": [ { "body": "I think we should default to the HF Image? ", "diff_hunk": "@@ -156,10 +156,13 @@ def __post_init__(self):\n class SageMakerConfig(BaseConfig):\n ec2_instance_type: str\n iam_role_name: str\n+ image_uri: str", "from_author": false }, { "body": "Does accelerate output some default metrics, e.g. `loss` then we could already define them here? Or is there a \"logging\" schema, that way we could define a whole bunch of metrics already. ", "diff_hunk": "@@ -156,10 +156,13 @@ def __post_init__(self):\n class SageMakerConfig(BaseConfig):\n ec2_instance_type: str\n iam_role_name: str\n+ image_uri: str\n profile: Optional[str] = None\n region: str = \"us-east-1\"\n num_machines: int = 1\n base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\n pytorch_version: str = SAGEMAKER_PYTORCH_VERSION\n transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION\n py_version: str = SAGEMAKER_PYTHON_VERSION\n+ sagemaker_inputs_file: str = None\n+ sagemaker_metrics_file: str = None", "from_author": false }, { "body": "`Accelerate` doesn't log any metrics by default, it enables running the same PyTorch code on any setup and as such user is in full control of what is being logged (flexibility).\r\n", "diff_hunk": "@@ -156,10 +156,13 @@ def __post_init__(self):\n class SageMakerConfig(BaseConfig):\n ec2_instance_type: str\n iam_role_name: str\n+ image_uri: str\n profile: Optional[str] = None\n region: str = \"us-east-1\"\n num_machines: int = 1\n base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\n pytorch_version: str = SAGEMAKER_PYTORCH_VERSION\n transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION\n py_version: str = SAGEMAKER_PYTHON_VERSION\n+ sagemaker_inputs_file: str = None\n+ sagemaker_metrics_file: str = None", "from_author": true }, { "body": "Hello, HF image is used by default if `image_uri` isn't specified and it is based on the `Python`, `PyTorch` and `Transformers` versions.", "diff_hunk": "@@ -156,10 +156,13 @@ def __post_init__(self):\n class SageMakerConfig(BaseConfig):\n ec2_instance_type: str\n iam_role_name: str\n+ image_uri: str", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Wow @pacman100 you responded to that request quickly. Thanks everyone, the Accelerate team is awesome.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/504", "pr_id": 992996980 }, { "diff": "diff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex d3b253bce..403587f10 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -137,7 +137,7 @@ def training_function(config, args):\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=100,\n- num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ num_training_steps=(len(train_dataloader) * num_epochs),\n )\n \n # Prepare everything\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex c5ed66cd4..4fad12c72 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -39,6 +39,9 @@ class AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n \n+ Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient\n+ accumulation.\n+\n Args:\n optimizer (`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\ndiff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nindex cfd6d719f..75695091e 100644\n--- a/src/accelerate/scheduler.py\n+++ b/src/accelerate/scheduler.py\n@@ -16,7 +16,7 @@\n \n import warnings\n \n-from .state import AcceleratorState, GradientState\n+from .state import AcceleratorState\n \n \n warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.optim.lr_scheduler\")\n@@ -25,10 +25,11 @@\n class AcceleratedScheduler:\n \"\"\"\n A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful\n- to avoid making a scheduler step too fast when:\n+ to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed\n+ precision training)\n \n- - gradients went overflow and there was no training step (in mixed precision training)\n- - step was skipped because of gradient accumulation\n+ When performing gradient accumulation scheduler lengths should not be changed accordingly, accelerate will always\n+ step the scheduler to account for it.\n \n Args:\n scheduler (`torch.optim.lr_scheduler._LRScheduler`):\n@@ -47,7 +48,6 @@ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, spli\n self.scheduler = scheduler\n self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]\n self.split_batches = split_batches\n- self.gradient_state = GradientState()\n self.step_with_optimizer = step_with_optimizer\n \n def step(self, *args, **kwargs):\n", "code_comments": [ { "body": "```suggestion\r\n When performing gradient accumulation scheduler lengths should not be changed accordingly, accelerate will\r\n always step the scheduler to account for it.\r\n```", "diff_hunk": "@@ -25,10 +25,11 @@\n class AcceleratedScheduler:\n \"\"\"\n A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful\n- to avoid making a scheduler step too fast when:\n+ to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed\n+ precision training)\n \n- - gradients went overflow and there was no training step (in mixed precision training)\n- - step was skipped because of gradient accumulation\n+ When performing gradient accumulation scheduler lengths should not be changed accordingly, accelerate will handle\n+ the right times to step.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/500", "pr_id": 991864237 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 806f0094d..a0278218b 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -1,6 +1,6 @@\n name: Run Tests\n \n-on: [pull_request]\n+# on: [pull_request]\n \n env:\n HF_HOME: ~/hf_cache\ndiff --git a/Makefile b/Makefile\nindex 9cae9d74d..100a1484a 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -58,4 +58,4 @@ test_prod:\n \t$(MAKE) test_core\n \n test_rest:\n-\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\"\n\\ No newline at end of file\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\"\ndiff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex c2fd77282..0da843ec2 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -1,38 +1,68 @@\n-- sections: \n+- sections:\n - local: index\n title: πŸ€— Accelerate\n- - local: quicktour\n- title: Quick tour\n- - local: installation\n+ - local: basic_tutorials/install\n title: Installation\n- title: Get started\n+ - local: quicktour\n+ title: Quicktour\n+ title: Getting started\n+- sections:\n+ - local: basic_tutorials/overview\n+ title: Overview\n+ - local: basic_tutorials/migration\n+ title: Migrating to πŸ€— Accelerate\n+ - local: basic_tutorials/launch\n+ title: Launching distributed code\n+ - local: basic_tutorials/notebook\n+ title: Launching distributed training from Jupyter Notebooks\n+ title: Tutorials\n - sections:\n- - local: big_modeling\n- title: Handling big models\n- - local: gradient_accumulation\n- title: Gradient accumulation\n- - local: sagemaker\n- title: Amazon SageMaker\n- title: Guides\n+ - local: usage_guides/gradient_accumulation\n+ title: Performing gradient accumulation\n+ - local: usage_guides/fsdp\n+ title: Fully Sharded Data Parallelism\n+ - local: usage_guides/checkpoint\n+ title: Saving and loading training states\n+ - local: usage_guides/deepspeed\n+ title: How to use DeepSpeed\n+ - local: usage_guides/tracking\n+ title: Using experiment trackers\n+ - local: usage_guides/big_modeling\n+ title: How to use large models with small resources\n+ - local: usage_guides/memory\n+ title: How to avoid CUDA Out-of-Memory\n+ - local: usage_guides/sagemaker\n+ title: Using Accelerate on SageMaker\n+ title: How-To Guides\n - sections:\n- - local: accelerator\n- title: Accelerator\n- - local: launcher\n- title: Notebook Launcher\n- - local: kwargs\n- title: Kwargs Handlers\n- - local: checkpoint\n- title: Checkpointing\n- - local: internal\n- title: Internals\n- - local: tracking\n- title: Experiment Tracking\n- - local: fsdp\n- title: Fully Sharded Data Parallel\n- - local: memory\n- title: Memory Utilities\n- - local: deepspeed\n- title: DeepSpeed\n- - local: utilities\n- title: General Utilities\n- title: API Reference\n+ - local: concept_guides/gradient_synchronization\n+ title: Gradient synchronization\n+ - local: concept_guides/deferring_execution\n+ title: Executing and deferring jobs\n+ - local: concept_guides/training_tpu\n+ title: TPU best practices\n+ title: Concepts and fundamentals\n+- sections: \n+ - local: package_reference/accelerator\n+ title: Main Accelerator class\n+ - local: package_reference/state\n+ title: Stateful configuration classes\n+ - local: package_reference/cli\n+ title: The Command Line\n+ - local: package_reference/torch_wrappers\n+ title: Torch wrapper classes\n+ - local: package_reference/tracking\n+ title: Experiment trackers\n+ - local: package_reference/launchers\n+ title: Distributed launchers\n+ - local: package_reference/deepspeed\n+ title: DeepSpeed utilities\n+ - local: package_reference/logging\n+ title: Logging\n+ - local: package_reference/big_modeling\n+ title: Working with large models\n+ - local: package_reference/kwargs\n+ title: Kwargs handlers\n+ - local: package_reference/utilities\n+ title: Utility functions and classes\n+ title: \"Reference\"\n\\ No newline at end of file\ndiff --git a/docs/source/accelerator.mdx b/docs/source/accelerator.mdx\ndeleted file mode 100644\nindex 268b09d1f..000000000\n--- a/docs/source/accelerator.mdx\n+++ /dev/null\n@@ -1,65 +0,0 @@\n-<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n-Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n-the License. You may obtain a copy of the License at\n-\n-http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n-an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n-specific language governing permissions and limitations under the License.\n--->\n-\n-# Accelerator\n-\n-The [`Accelerator`] is the main class provided by πŸ€— Accelerate. It serves at the main entrypoint for\n-the API. To quickly adapt your script to work on any kind of setup with πŸ€— Accelerate just:\n-\n-1. Initialize an [`Accelerator`] object (that we will call `accelerator` in the rest of this\n- page) as early as possible in your script.\n-2. Pass along your model(s), optimizer(s), dataloader(s) to the [`~Accelerator.prepare`] method.\n-3. (Optional but best practice) Remove all the `.cuda()` or `.to(device)` in your code and let the\n- `accelerator` handle device placement for you.\n-4. Replace the `loss.backward()` in your code by `accelerator.backward(loss)`.\n-5. (Optional, when using distributed evaluation) Gather your predictions and labels before storing them or using\n- them for metric computation using [`~Accelerator.gather`].\n-\n-This is all that is needed in most cases. For more advanced cases or a nicer experience here are the functions you\n-should search for and replace by the corresponding methods of your `accelerator`:\n-\n-- `print` statements should be replaced by [`~Accelerator.print`] to be only printed once per\n- process.\n-- Use [`~Accelerator.is_local_main_process`] for statements that should be executed once per server.\n-- Use [`~Accelerator.is_main_process`] for statements that should be executed once only.\n-- Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing\n- (useful before a model save for instance).\n-- Use [`~Accelerator.unwrap_model`] to unwrap your model before saving it.\n-- Use [`~Accelerator.save`] instead of `torch.save`.\n-- Use [`~Accelerator.clip_grad_norm_`] instead of `torch.nn.utils.clip_grad_norm_` and\n- [`~Accelerator.clip_grad_value_`] instead of `torch.nn.utils.clip_grad_value_`.\n-\n-To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`. \n-This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should\n-actually be performed, and auto-scale the loss:\n-\n-```python\n-accelerator = Accelerator(gradient_accumulation_steps=2)\n-model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)\n-\n-for input, label in training_dataloader:\n- with accelerator.accumulate(model):\n- predictions = model(input)\n- loss = loss_function(predictions, label)\n- accelerator.backward(loss)\n- optimizer.step()\n- scheduler.step()\n- optimizer.zero_grad()\n-```\n-\n-<Tip warning={true}>\n-\n-Using this with `dispatch_batches=True` (which is the default for iterable datasets) is currently not supported.\n-\n-</Tip>\n-\n-[[autodoc]] Accelerator\ndiff --git a/docs/source/basic_tutorials/install.mdx b/docs/source/basic_tutorials/install.mdx\nnew file mode 100644\nindex 000000000..19630f575\n--- /dev/null\n+++ b/docs/source/basic_tutorials/install.mdx\n@@ -0,0 +1,99 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Installation and Configuration\n+\n+Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.7+**.\n+\n+## Installing πŸ€— Accelerate\n+\n+πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:\n+\n+### pip \n+\n+To install πŸ€— Accelerate from pypi, perform:\n+\n+```bash\n+pip install accelerate\n+```\n+\n+### conda\n+\n+πŸ€— Accelerate can also be installed with conda with:\n+\n+```bash\n+conda install -c conda-forge accelerate\n+```\n+\n+### Source\n+\n+New features are added every day that haven't been released yet. To try them out yourself, install\n+from the GitHub repository:\n+\n+```bash\n+pip install git+https://github.com/huggingface/accelerate\n+```\n+\n+If you're working on contributing to the library or wish to play with the source code and see live \n+results as you run the code, an editable version can be installed from a locally-cloned version of the \n+repository:\n+\n+```bash\n+git clone https://github.com/huggingface/accelerate\n+cd accelerate\n+pip install -e .\n+```\n+\n+## Configuring πŸ€— Accelerate\n+\n+After installing, you need to configure πŸ€— Accelerate for how the current system is setup for training. \n+To do so run the following and answer the questions prompted to you:\n+\n+```bash\n+accelerate config\n+```\n+\n+To write a barebones configuration that doesn't include options such as DeepSpeed configuration or running on TPUs, you can quickly run:\n+\n+```bash\n+python -c \"from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')\"\n+```\n+πŸ€— Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode.\n+\n+To check that your configuration looks fine, run:\n+\n+```bash\n+accelerate env\n+```\n+\n+An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used:\n+\n+```bash\n+- `Accelerate` version: 0.11.0.dev0\n+- Platform: Linux-5.10.0-15-cloud-amd64-x86_64-with-debian-11.3\n+- Python version: 3.7.12\n+- Numpy version: 1.19.5\n+- PyTorch version (GPU?): 1.12.0+cu102 (True)\n+- `Accelerate` default config:\n+ - compute_environment: LOCAL_MACHINE\n+ - distributed_type: MULTI_GPU\n+ - mixed_precision: no\n+ - use_cpu: False\n+ - num_processes: 2\n+ - machine_rank: 0\n+ - num_machines: 1\n+ - main_process_ip: None\n+ - main_process_port: None\n+ - main_training_function: main\n+ - deepspeed_config: {}\n+ - fsdp_config: {}\n+```\n\\ No newline at end of file\ndiff --git a/docs/source/basic_tutorials/launch.mdx b/docs/source/basic_tutorials/launch.mdx\nnew file mode 100644\nindex 000000000..741920f26\n--- /dev/null\n+++ b/docs/source/basic_tutorials/launch.mdx\n@@ -0,0 +1,178 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+<Tip warning={true}>\n+\n+ It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking. \n+ Otherwise πŸ€— Accelerate will use very basic defaults depending on your system setup.\n+\n+</Tip>\n+\n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them are.\n+\n+<Tip>\n+\n+ If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`.\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\n+For example, here is how to use `accelerate launch` with a single GPU:\n+\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision.\n+Here is how you would use all GPUs and train with mixed precision disabled:\n+\n+```bash\n+accelerate launch --multi_gpu {script_name.py} {--arg1} {--arg2} ...\n+```\n+\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: \n+\n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} {--arg1} {--arg2} ...\n+```\n+\n+For a complete list of parameters you can pass in, run:\n+\n+```bash\n+accelerate launch -h\n+```\n+\n+<Tip>\n+\n+ Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts!\n+\n+</Tip>\n+\n+For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`:\n+\n+```bash\n+MIXED_PRECISION=\"fp16\" torchrun --nproc_per_node=2 --num_machines=1 {script_name.py} {--arg1} {--arg2} ...\n+```\n+\n+## Why you should always use `accelerate config`\n+\n+Why is it useful to the point you should **always** run `accelerate config`? \n+\n+Remember that earlier call to `accelerate launch` as well as `torchrun`?\n+Post configuration, to run that script with the needed parts you just need to use `accelerate launch` outright, without passing anything else in:\n+\n+```bash\n+accelerate launch {script_name.py} {--arg1} {--arg2} ...\n+```\n+\n+\n+## Custom Configurations\n+\n+As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations \n+made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for πŸ€— Accelerate. \n+This cache folder is located at (with decreasing order of priority):\n+\n+- The content of your environment variable `HF_HOME` suffixed with `accelerate`.\n+- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with\n+ `huggingface/accelerate`.\n+- If this does not exist either, the folder `~/.cache/huggingface/accelerate`.\n+\n+To have multiple configurations, the flag `--config_file` can be passed to the `accelerate launch` command paired \n+with the location of the custom yaml. \n+\n+An example yaml may look something like the following for two GPUs on a single machine using `fp16` for mixed precision:\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MULTI_GPU\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+Launching a script from the location of that custom yaml file looks like the following:\n+```bash\n+accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...\n+```\n\\ No newline at end of file\ndiff --git a/docs/source/basic_tutorials/migration.mdx b/docs/source/basic_tutorials/migration.mdx\nnew file mode 100644\nindex 000000000..ab703c960\n--- /dev/null\n+++ b/docs/source/basic_tutorials/migration.mdx\n@@ -0,0 +1,123 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code to use πŸ€— Accelerate!\n+You'll see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, write out a very basic PyTorch training loop. \n+\n+<Tip>\n+\n+ We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.\n+\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+- device = 'cuda'\n++ device = accelerator.device\n+ model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. πŸ€— Accelerate will\n+make sure everything is setup in the current environment for you to start training:\n+\n+```\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+```\n+These objects are returned in the same order they were sent in with. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.\n+If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier. \n+\n+<Tip warning={true}>\n+\n+ Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).\n+\n+</Tip>\n+\n+### Modifying the training loop\n+\n+Finally, three lines of code need to be changed in the training loop. πŸ€— Accelerate's DataLoader classes will automatically handle the device placement by default,\n+and [`~Accelerator.backward`] should be used for performing the backward pass:\n+\n+```diff\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+- loss.backward()\n++ accelerator.backward(loss)\n+```\n+\n+With that, your training loop is now ready to use πŸ€— Accelerate!\n+\n+## The finished code\n+\n+Below is the final version of the converted code: \n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\ndiff --git a/docs/source/basic_tutorials/notebook.mdx b/docs/source/basic_tutorials/notebook.mdx\nnew file mode 100644\nindex 000000000..903a992d3\n--- /dev/null\n+++ b/docs/source/basic_tutorials/notebook.mdx\n@@ -0,0 +1,429 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching Multi-Node Training from a Jupyter Environment\n+\n+This tutorial teaches you how to fine tune a computer vision model with πŸ€— Accelerate from a Jupyter Notebook on a distributed system.\n+You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training.\n+\n+<Tip>\n+\n+ This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb)\n+\n+</Tip>\n+\n+## Configuring the Environment\n+\n+Before any training can be performed, a πŸ€— Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:\n+\n+```bash\n+accelerate config\n+```\n+\n+However, if general defaults are fine and you are *not* running on a TPU, πŸ€—Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`].\n+\n+The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. \n+\n+<Tip warning={true}>\n+\n+ CUDA can't be initialized more than once on a multi-node system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.\n+ \n+</Tip>\n+\n+```python\n+import os\n+from accelerate.utils import write_basic_config\n+\n+write_basic_config() # Write a config file\n+os._exit(00) # Restart the notebook\n+```\n+\n+## Preparing the Dataset and Model\n+\n+Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. \n+\n+If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. \n+\n+Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example)\n+\n+```python\n+import os, re, torch, PIL\n+import numpy as np\n+\n+from torch.optim.lr_scheduler import OneCycleLR\n+from torch.utils.data import DataLoader, Dataset\n+from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+from timm import create_model\n+```\n+\n+First you need to create a function to extract the class name based on a filename:\n+\n+```python\n+import os\n+\n+data_dir = \"../../images\"\n+fnames = os.listdir(data_dir)\n+fname = fnames[0]\n+print(fname)\n+```\n+\n+```python out\n+beagle_32.jpg\n+```\n+\n+In the case here, the label is `beagle`. Using regex you can extract the label from the filename:\n+\n+```python\n+import re\n+\n+\n+def extract_label(fname):\n+ stem = fname.split(os.path.sep)[-1]\n+ return re.search(r\"^(.*)_\\d+\\.jpg$\", stem).groups()[0]\n+```\n+\n+```python\n+extract_label(fname)\n+```\n+\n+And you can see it properly returned the right name for our file:\n+\n+```python out\n+\"beagle\"\n+```\n+\n+Next a `Dataset` class should be made to handle grabbing the image and the label:\n+\n+```python\n+class PetsDataset(Dataset):\n+ def __init__(self, file_names, image_transform=None, label_to_id=None):\n+ self.file_names = file_names\n+ self.image_transform = image_transform\n+ self.label_to_id = label_to_id\n+\n+ def __len__(self):\n+ return len(self.file_names)\n+\n+ def __getitem__(self, idx):\n+ fname = self.file_names[idx]\n+ raw_image = PIL.Image.open(fname)\n+ image = raw_image.convert(\"RGB\")\n+ if self.image_transform is not None:\n+ image = self.image_transform(image)\n+ label = extract_label(fname)\n+ if self.label_to_id is not None:\n+ label = self.label_to_id[label]\n+ return {\"image\": image, \"label\": label}\n+```\n+\n+Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the \n+launched function:\n+\n+```python\n+fnames = [os.path.join(\"../../images\", fname) for fname in fnames if fname.endswith(\".jpg\")]\n+```\n+\n+Next gather all the labels:\n+\n+```python\n+all_labels = [extract_label(fname) for fname in fnames]\n+id_to_label = list(set(all_labels))\n+id_to_label.sort()\n+label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}\n+```\n+\n+Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically \n+sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. \n+\n+```python\n+def get_dataloaders(batch_size: int = 64):\n+ \"Builds a set of dataloaders with a batch_size\"\n+ random_perm = np.random.permutation(len(fnames))\n+ cut = int(0.8 * len(fnames))\n+ train_split = random_perm[:cut]\n+ eval_split = random_perm[:cut]\n+\n+ # For training a simple RandomResizedCrop will be used\n+ train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()])\n+ train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id)\n+\n+ # For evaluation a deterministic Resize will be used\n+ eval_tfm = Compose([Resize((224, 224)), ToTensor()])\n+ eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)\n+\n+ # Instantiate dataloaders\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4)\n+ return train_dataloader, eval_dataloader\n+```\n+\n+Finally, you should import the scheduler to be used later:\n+\n+```python\n+from torch.optim.lr_scheduler import CosineAnnealingLR\n+```\n+\n+## Writing the Training Function\n+\n+Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system.\n+\n+Here is a basic training loop for the animal classification problem:\n+\n+<Tip>\n+\n+ The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end\n+\n+</Tip>\n+\n+\n+```python\n+def training_loop(mixed_precision=\"fp16\", seed: int = 42, batch_size: int = 64):\n+ set_seed(seed)\n+ accelerator = Accelerator(mixed_precision=mixed_precision)\n+```\n+\n+First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible.\n+\n+<Tip warning={true}>\n+\n+ If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated \n+ outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) \n+ to learn why\n+\n+</Tip>\n+\n+Next you should build your dataloaders and create your model:\n+\n+```python\n+ train_dataloader, eval_dataloader = get_dataloaders(batch_size)\n+ model = create_model(\"resnet50d\", pretrained=True, num_classes=len(label_to_id))\n+```\n+\n+<Tip>\n+\n+ You build the model here so that the seed also controls the new weight initialization\n+\n+</Tip>\n+\n+As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be \n+trained only initially:\n+\n+```python\n+ for param in model.parameters():\n+ param.requires_grad = False\n+ for param in model.get_classifier().parameters():\n+ param.requires_grad = True\n+```\n+\n+Normalizing the batches of images will make training a little faster:\n+\n+```python\n+ mean = torch.tensor(model.default_cfg[\"mean\"])[None, :, None, None]\n+ std = torch.tensor(model.default_cfg[\"std\"])[None, :, None, None]\n+```\n+\n+To make these constants available on the active device, you should set it to the Accelerator's device:\n+\n+```python\n+ mean = mean.to(accelerator.device)\n+ std = std.to(accelerator.device)\n+```\n+\n+Next instantiate the rest of the PyTorch classes used for training:\n+\n+```python\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)\n+ lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader))\n+```\n+\n+Before passing everything to [`~Accelerator.prepare`].\n+\n+<Tip>\n+\n+ There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method.\n+\n+</Tip>\n+\n+```python\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+```\n+\n+Now train the model:\n+\n+```python\n+ for epoch in range(5):\n+ model.train()\n+ for batch in train_dataloader:\n+ inputs = (batch[\"image\"] - mean) / std\n+ outputs = model(inputs)\n+ loss = torch.nn.functional.cross_entropy(outputs, batch[\"label\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+```\n+\n+The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall \n+total accuracy of each batch will be added to two constants:\n+\n+```python\n+ model.eval()\n+ accurate = 0\n+ num_elems = 0\n+```\n+\n+Next you have the rest of your standard PyTorch loop:\n+\n+```python\n+ for batch in eval_dataloader:\n+ inputs = (batch[\"image\"] - mean) / std\n+ with torch.no_grad():\n+ outputs = model(inputs)\n+ predictions = outputs.argmax(dim=-1)\n+```\n+\n+Before finally the last major difference. \n+\n+When performing distributed evaluation, the predictions and labels need to be passed through \n+[`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved:\n+\n+```python\n+ accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ num_elems += accurate_preds.shape[0]\n+ accurate += accurate_preds.long().sum()\n+```\n+\n+Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]:\n+\n+```python\n+ eval_metric = accurate.item() / num_elems\n+ accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n+```\n+\n+A full version of this training loop is available below:\n+\n+```python\n+def training_loop(mixed_precision=\"fp16\", seed: int = 42, batch_size: int = 64):\n+ set_seed(seed)\n+ # Initialize accelerator\n+ accelerator = Accelerator(mixed_precision=mixed_precision)\n+ # Build dataloaders\n+ train_dataloader, eval_dataloader = get_dataloaders(batch_size)\n+\n+ # Instantiate the model (you build the model here so that the seed also controls new weight initaliziations)\n+ model = create_model(\"resnet50d\", pretrained=True, num_classes=len(label_to_id))\n+\n+ # Freeze the base model\n+ for param in model.parameters():\n+ param.requires_grad = False\n+ for param in model.get_classifier().parameters():\n+ param.requires_grad = True\n+\n+ # You can normalize the batches of images to be a bit faster\n+ mean = torch.tensor(model.default_cfg[\"mean\"])[None, :, None, None]\n+ std = torch.tensor(model.default_cfg[\"std\"])[None, :, None, None]\n+\n+ # To make this constant available on the active device, set it to the accelerator device\n+ mean = mean.to(accelerator.device)\n+ std = std.to(accelerator.device)\n+\n+ # Intantiate the optimizer\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)\n+\n+ # Instantiate the learning rate scheduler\n+ lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader))\n+\n+ # Prepare everything\n+ # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Now you train the model\n+ for epoch in range(5):\n+ model.train()\n+ for batch in train_dataloader:\n+ inputs = (batch[\"image\"] - mean) / std\n+ outputs = model(inputs)\n+ loss = torch.nn.functional.cross_entropy(outputs, batch[\"label\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ accurate = 0\n+ num_elems = 0\n+ for batch in eval_dataloader:\n+ inputs = (batch[\"image\"] - mean) / std\n+ with torch.no_grad():\n+ outputs = model(inputs)\n+ predictions = outputs.argmax(dim=-1)\n+ accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ num_elems += accurate_preds.shape[0]\n+ accurate += accurate_preds.long().sum()\n+\n+ eval_metric = accurate.item() / num_elems\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n+```\n+\n+## Using the notebook_launcher\n+\n+All that's left is to use the [`notebook_launcher`].\n+\n+You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information)\n+\n+```python\n+from accelerate import notebook_launcher\n+```\n+\n+```python\n+args = (\"fp16\", 42, 64)\n+notebook_launcher(training_loop, args, num_processes=2)\n+```\n+\n+In the case of running on the TPU, it would look like so:\n+\n+```python\n+model = create_model(\"resnet50d\", pretrained=True, num_classes=len(label_to_id))\n+\n+args = (model, \"fp16\", 42, 64)\n+notebook_launcher(training_loop, args, num_processes=8)\n+```\n+\n+As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs:\n+\n+```python out\n+Launching training on 2 GPUs.\n+epoch 0: 88.12\n+epoch 1: 91.73\n+epoch 2: 92.58\n+epoch 3: 93.90\n+epoch 4: 94.71\n+```\n+\n+And that's it!\n+\n+## Conclusion\n+\n+This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember:\n+\n+- Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`]\n+- Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc)\n+- If using the TPU, declare your model outside the training loop function\n\\ No newline at end of file\ndiff --git a/docs/source/basic_tutorials/overview.mdx b/docs/source/basic_tutorials/overview.mdx\nnew file mode 100644\nindex 000000000..59ff9cbae\n--- /dev/null\n+++ b/docs/source/basic_tutorials/overview.mdx\n@@ -0,0 +1,21 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Overview\n+\n+Welcome to the πŸ€— Accelerate tutorials! These introductory guides will help catch you up to speed on working with πŸ€— Accelerate.\n+You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly,\n+and more!\n+\n+These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework.\n+\n+If you have any questions about πŸ€— Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).\n\\ No newline at end of file\ndiff --git a/docs/source/concept_guides/deferring_execution.mdx b/docs/source/concept_guides/deferring_execution.mdx\nnew file mode 100644\nindex 000000000..4297e2567\n--- /dev/null\n+++ b/docs/source/concept_guides/deferring_execution.mdx\n@@ -0,0 +1,95 @@\n+# Deferring Executions\n+\n+When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\n+GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be\n+faster than others.\n+\n+You might need to wait for all processes to have reached a certain point before executing a given instruction. For\n+instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to \n+continue training before all the model weights have been loaded in. To do this, just write the following line in your code:\n+\n+```\n+accelerator.wait_for_everyone()\n+```\n+\n+This instruction will block all the processes that arrive first until all the other processes have reached that\n+point (if you run your script on just one GPU or CPU, this won't do anything).\n+\n+A few example cases for when to use this utility are listed below:\n+\n+<Tip>\n+\n+ Some of these are utilized with the [`~Accelerator.main_process_first`] context manager, which utilizes [`~Accelerator.wait_for_everyone`] to \n+ run a particular set of code on the main process beforehand before triggering and launching the other processes\n+\n+</Tip>\n+\n+## Downloading a Dataset \n+\n+When downloading a dataset, you should download it first on the main process and then loading the cached dataset in afterwards\n+\n+<Tip>\n+\n+ `load_dataset` will perform a lock under the hood to stop multiple downloads from happening at once, but if you are downloading something \n+ not using this library you should use this method.\n+ \n+</Tip>\n+\n+```python\n+with accelerator.main_process_first():\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+```\n+\n+Under the hood this is the same as calling: \n+\n+```python\n+# First do something on the main process\n+if accelerator.is_main_process:\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+else:\n+ accelerator.wait_for_everyone()\n+\n+# And then send it to the rest of them\n+if not accelerator.is_main_process:\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+else:\n+ accelerator.wait_for_everyone()\n+```\n+\n+## Saving the `state_dict`\n+\n+When saving the `state_dict` of the model, since you would normally save one file on just the main process\n+you should specify that:\n+\n+```python\n+if accelerator.is_main_process:\n+ model = accelerator.unwrap_model(model)\n+ torch.save(model.state_dict(), \"weights.pth\")\n+```\n+\n+## Loading in the `state_dict`\n+\n+When loading in the `state_dict` to a model, optimizer, or scheduler, you should wait \n+for all workers to have the weights loaded in before moving on to training\n+\n+```python\n+with accelerator.main_process_first():\n+ state = torch.load(\"weights.pth\")\n+ model.load_state_dict(state)\n+```\n+\n+## Applying a multi-worker CPU operation \n+\n+Applying a `map()` operation on multiple workers, such as tokenizing should be done on the \n+main process first, and then propagated to each one. \n+\n+```python\n+datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+with accelerator.main_process_first():\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+```\n\\ No newline at end of file\ndiff --git a/docs/source/concept_guides/gradient_synchronization.mdx b/docs/source/concept_guides/gradient_synchronization.mdx\nnew file mode 100644\nindex 000000000..02b5adf0d\n--- /dev/null\n+++ b/docs/source/concept_guides/gradient_synchronization.mdx\n@@ -0,0 +1,117 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+ import torch.nn as nn\n+- from torch.nn.parallel import DistributedDataParallel\n+\n+ model = nn.Linear(10,10)\n++ model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this risk slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model\n+need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This \n+can cause a significant slowdown since all the processes need to communicate with them more times than needed. How \n+can you avoid this overhead?\n+\n+## Solving the slowdown problem\n+\n+Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called. \n+PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager\n+that is added to your model after converting it to DDP.\n+\n+Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this \n+context manager will trigger the synchronization. See an example below:\n+```python\n+ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+for index, batch in enumerate(dataloader):\n+ inputs, targets = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader) - 1):\n+ with ddp_model.no_sync():\n+ # Gradients only accumulate\n+ outputs = ddp_model(inputs)\n+ loss = loss_func(outputs)\n+ accelerator.backward(loss)\n+ else:\n+ # Gradients finally sync\n+ outputs = ddp_model(inputs)\n+ loss = loss_func(outputs)\n+ accelerator.backward(loss)\n+```\n+\n+In πŸ€— Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),\n+`ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way:\n+\n+```diff\n+ ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+ for index, batch in enumerate(dataloader):\n+ inputs, targets = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader)-1):\n+- with ddp_model.no_sync():\n++ with accelerator.no_sync(model):\n+ # Gradients only accumulate\n+ outputs = ddp_model(inputs)\n+ loss = loss_func(outputs, targets)\n+ accelerator.backward(loss)\n+ else:\n+ # Gradients finally sync\n+ outputs = ddp_model(inputs)\n+ loss = loss_func(outputs)\n+ accelerator.backward(loss)\n+```\n+\n+As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final\n+gradient accumulation API:\n+\n+```python\n+ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+for batch in dataloader:\n+ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+```\n\\ No newline at end of file\ndiff --git a/docs/source/concept_guides/training_tpu.mdx b/docs/source/concept_guides/training_tpu.mdx\nnew file mode 100644\nindex 000000000..32736ce31\n--- /dev/null\n+++ b/docs/source/concept_guides/training_tpu.mdx\n@@ -0,0 +1,164 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Training on TPUs with πŸ€— Accelerate\n+\n+Training on TPUs can be slightly different than training on multi-gpu, even with πŸ€— Accelerate. This guide aims to show you \n+where you should be careful and why, as well as the best practices in general.\n+\n+## Training in a Notebook\n+\n+The main carepoint when training on TPUs comes from the [`notebook_launcher`]. As mentioned in the [notebook tutorial](../usage_guides/notebook), you need to \n+restructure your training code into a function that can get passed to the [`notebook_launcher`] function and be careful about not declaring any tensors on the GPU.\n+\n+While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. \n+When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already \n+utilizing a python process, you need to *fork* a new process from it to launch your code. \n+\n+Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your \n+training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one \n+model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or\n+on Google Colaboratory. \n+\n+Below is an example of a training function passed to the [`notebook_launcher`] if training on CPUs or GPUs:\n+\n+<Tip>\n+\n+ This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight \n+ modifications for the sake of simplicity\n+\n+</Tip>\n+\n+```python\n+def training_function():\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=hyperparameters[\"learning_rate\"])\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader\n+ )\n+\n+ num_epochs = hyperparameters[\"num_epochs\"]\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ accelerator.backward(loss)\n+\n+ optimizer.step()\n+ optimizer.zero_grad()\n+```\n+\n+```python\n+from accelerate import notebook_launcher\n+\n+notebook_launcher(training_function)\n+```\n+\n+<Tip>\n+\n+ The `notebook_launcher` will default to 8 processes if πŸ€— Accelerate has been configured for a TPU\n+\n+</Tip>\n+\n+If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error \n+like:\n+\n+```\n+ProcessExitedException: process 0 terminated with signal SIGSEGV\n+```\n+\n+This error is *extremely* cryptic but the basic explaination is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \n+accept a single `model` argument, and declare it in an outside cell:\n+\n+```python\n+# In another Jupyter cell\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+```\n+\n+```diff\n++ def training_function(model):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+- model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+ ...\n+```\n+\n+And finally calling the training function with:\n+\n+```diff\n+ from accelerate import notebook_launcher\n+- notebook_launcher(training_function)\n++ notebook_launcher(training_function, (model,))\n+```\n+\n+<Tip>\n+\n+ The above workaround is only needed when launching a TPU instance from a Jupyter Notebook on a low-resource server such as Google Colaboratory or Kaggle. If \n+ using a script or launching on a much beefier server declaring the model beforehand is not needed.\n+\n+</Tip>\n+\n+## Mixed Precision and Global Variables \n+\n+As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), πŸ€— Accelerate supports fp16 and bf16, both of which can be used on TPUs.\n+That being said, ideally `bf16` should be utilized as it is extremely efficient to use.\n+\n+There are two \"layers\" when using `bf16` and πŸ€— Accelerate on TPUs, at the base level and at the operation level. \n+\n+At the base level, this is enabled when passing `mixed_precision=\"bf16\"` to `Accelerator`, such as:\n+```python\n+accelerator = Accelerator(mixed_precision=\"bf16\")\n+```\n+By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. \n+The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.\n+\n+There is a futher configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then \n+`torch.float` is `bfloat16` and `torch.double` is `float32`.\n+\n+This is performed in the `Accelerator` object when passing `downcast_bf16=True`:\n+```python\n+accelerator = Accelerator(mixed_precision=\"bf16\", downcast_bf16=True)\n+```\n+\n+Using downcasting instead of bf16 everywhere is good for when you are trying to calculate metrics, log values, and more where raw bf16 tensors would be unusable. \n+\n+## Training Times on TPUs\n+\n+As you launch your script, you may notice that training seems exceptionally slow at first. This is because TPUs\n+first run through a few batches of data to see how much memory to allocate before finally utilizing this configured \n+memory allocation extremely efficiently. \n+\n+If you notice that your evaluation code to calculate the metrics of your model takes longer due to a larger batch size being used, \n+it is recommended to keep the batch size the same as the training data if it is too slow. Otherwise the memory will reallocate to this \n+new batch size after the first few iterations. \n+\n+<Tip>\n+\n+ Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader.\n+\n+</Tip>\n\\ No newline at end of file\ndiff --git a/docs/source/index.mdx b/docs/source/index.mdx\nindex 16593d554..3f0f58802 100644\n--- a/docs/source/index.mdx\n+++ b/docs/source/index.mdx\n@@ -1,4 +1,4 @@\n-<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n \n Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n the License. You may obtain a copy of the License at\n@@ -12,121 +12,60 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n-\n-## Features\n-\n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n ```diff\n + from accelerate import Accelerator\n-\n + accelerator = Accelerator()\n- # Use the device given by the *accelerator* object.\n-+ device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n+\n++ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n + )\n \n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n inputs, targets = batch\n inputs = inputs.to(device)\n targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n + accelerator.backward(loss)\n- my_optimizer.step()\n+ optimizer.step()\n+ scheduler.step()\n ```\n \n-and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n+Built on `torch_xla` and `torch.distributed`, πŸ€— Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.\n+Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training! \n \n-You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n-especially for TPU training):\n+<Tip> \n \n-```diff\n-+ from accelerate import Accelerator\n+ To get a better idea of this process, make sure to check out the [Tutorials](basic_tutorials/overview)! \n \n-+ accelerator = Accelerator()\n-- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n-+ )\n+</Tip>\n \n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n-```\n-\n-## Script launcher\n-\n-No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training! πŸ€—\n-Accelerate comes with a CLI tool that will make your life easier when launching distributed scripts.\n-\n-On your machine(s) just run:\n \n+This code can then be launched on any system through Accelerate's CLI interface:\n ```bash\n-accelerate config\n+accelerate launch {my_script.py}\n ```\n \n-and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n-default options when doing\n-\n-```bash\n-accelerate launch my_script.py --args_to_my_script\n-```\n-\n-For instance, here is how you would run the NLP example (from the root of the repo):\n-\n-```bash\n-accelerate launch examples/nlp_example.py\n-```\n-\n-## Supported integrations\n-\n-- CPU only\n-- single GPU\n-- multi-GPU on one node (machine)\n-- multi-GPU on several nodes (machines)\n-- TPU\n-- FP16 with native AMP (apex on the roadmap)\n-- DeepSpeed (experimental support)\n+<div class=\"mt-10\">\n+ <div class=\"w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5\">\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/basic_tutorials/overview\"\n+ ><div class=\"w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Tutorials</div>\n+ <p class=\"text-gray-700\">Learn the basics and become familiar with using πŸ€— Accelerate. Start here if you are using πŸ€— Accelerate for the first time!</p>\n+ </a>\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/utility_guides/gradient_accumulation\"\n+ ><div class=\"w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">How-to guides</div>\n+ <p class=\"text-gray-700\">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use πŸ€— Accelerate to solve real-world problems.</p>\n+ </a>\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/concept_guides/gradient_synchronization\"\n+ ><div class=\"w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Conceptual guides</div>\n+ <p class=\"text-gray-700\">High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.</p>\n+ </a>\n+ <a class=\"!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg\" href=\"/docs/accelerate/package_reference/accelerator\"\n+ ><div class=\"w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed\">Reference</div>\n+ <p class=\"text-gray-700\">Technical descriptions of how πŸ€— Accelerate classes and methods work.</p>\n+ </a>\n+ </div>\n+</div>\n\\ No newline at end of file\ndiff --git a/docs/source/installation.mdx b/docs/source/installation.mdx\ndeleted file mode 100644\nindex 26c2698aa..000000000\n--- a/docs/source/installation.mdx\n+++ /dev/null\n@@ -1,96 +0,0 @@\n-<!---\n-Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n-Licensed under the Apache License, Version 2.0 (the \"License\");\n-you may not use this file except in compliance with the License.\n-You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software\n-distributed under the License is distributed on an \"AS IS\" BASIS,\n-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-See the License for the specific language governing permissions and\n-limitations under the License.\n--->\n-\n-# Installation\n-\n-πŸ€— Accelerate is tested on Python 3.6+, and PyTorch 1.6.0+.\n-\n-You should install πŸ€— Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're\n-unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going\n-to use and activate it.\n-\n-Now, if you want to use πŸ€— Accelerate, you can install it with pip.\n-\n-## Installation with pip\n-\n-First you need to install PyTorch. Please refer to the\n-[PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform.\n-\n-When PyTorch has been installed, πŸ€— Accelerate can be installed using pip as follows:\n-\n-```bash\n-pip install accelerate\n-```\n-\n-Alternatively, for CPU-support only, you can install πŸ€— Accelerate and PyTorch in one line with:\n-\n-```bash\n-pip install accelerate[torch]\n-```\n-\n-To check πŸ€— Accelerate is properly installed, run the following command:\n-\n-```bash\n-python -c \"TODO write\"\n-```\n-\n-## Installing from source\n-\n-Here is how to quickly install `accelerate` from source:\n-\n-```bash\n-pip install git+https://github.com/huggingface/accelerate\n-```\n-\n-Note that this will install not the latest released version, but the bleeding edge `main` version, which you may want to use in case a bug has been fixed since the last official release and a new release hasn't been yet rolled out.\n-\n-While we strive to keep `main` operational at all times, if you notice some issues, they usually get fixed within a few hours or a day and you're more than welcome to help us detect any problems by opening an [Issue](https://github.com/huggingface/accelerate/issues) and this way, things will get fixed even sooner.\n-\n-Again, you can run:\n-\n-```bash\n-python -c \"TODO write\"\n-```\n-\n-to check πŸ€— Accelerate is properly installed.\n-\n-## Editable install\n-\n-If you want to constantly use the bleeding edge `main` version of the source code, or if you want to contribute to the library and need to test the changes in the code you're making, you will need an editable install. This is done by cloning the repository and installing with the following commands:\n-\n-``` bash\n-git clone https://github.com/huggingface/accelerate.git\n-cd accelerate\n-pip install -e .\n-```\n-\n-This command performs a magical link between the folder you cloned the repository to and your python library paths, and it'll look inside this folder in addition to the normal library-wide paths. So if normally your python packages get installed into:\n-```\n-~/anaconda3/envs/main/lib/python3.7/site-packages/\n-```\n-now this editable install will reside where you clone the folder to, e.g. `~/accelerate/` and python will search it too.\n-\n-Do note that you have to keep that `accelerate` folder around and not delete it to continue using the πŸ€— Accelerate library.\n-\n-Now, let's get to the real benefit of this installation approach. Say, you saw some new feature just has been committed into `main`. If you have already performed all the steps above, to update your accelerate repo to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:\n-\n-```bash\n-cd ~/accelerate/\n-git pull\n-```\n-\n-There is nothing else to do. Your python environment will find the bleeding edge version of πŸ€— Accelerate on the next run.\n-\ndiff --git a/docs/source/launcher.mdx b/docs/source/launcher.mdx\ndeleted file mode 100644\nindex fb672d5ad..000000000\n--- a/docs/source/launcher.mdx\n+++ /dev/null\n@@ -1,28 +0,0 @@\n-<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n-Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n-the License. You may obtain a copy of the License at\n-\n-http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n-an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n-specific language governing permissions and limitations under the License.\n--->\n-\n-# Notebook Launcher\n-\n-Launch your training function inside a notebook. Currently supports launching a training with TPUs on [Google\n-Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kaggle.com/code), as well as training on\n-several GPUs (if the machine on which you are running your notebook has them).\n-\n-An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb).\n-\n-<Tip warning={true}>\n-\n-Your `Accelerator` object should only be defined inside the training function. This is because the\n-initialization should be done inside the launcher only.\n-\n-</Tip>\n-\n-[[autodoc]] notebook_launcher\ndiff --git a/docs/source/package_reference/accelerator.mdx b/docs/source/package_reference/accelerator.mdx\nnew file mode 100644\nindex 000000000..fb20f1a66\n--- /dev/null\n+++ b/docs/source/package_reference/accelerator.mdx\n@@ -0,0 +1,163 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerator\n+\n+The [`Accelerator`] is the main class provided by πŸ€— Accelerate. \n+It serves at the main entrypoint for the API. \n+\n+## Quick adaptation of your code\n+\n+To quickly adapt your script to work on any kind of setup with πŸ€— Accelerate just:\n+\n+1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script.\n+2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method.\n+3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you. \n+\n+<Tip>\n+\n+ Step three is optional, but considered a best practice.\n+\n+</Tip>\n+\n+4. Replace `loss.backward()` in your code with `accelerator.backward(loss)`\n+5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`]\n+\n+<Tip warning={true}>\n+\n+ Step five is mandatory when using distributed evaluation\n+ \n+</Tip>\n+\n+In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features\n+you should search for and replace by the corresponding methods of your `accelerator`:\n+\n+## Advanced recommendations\n+\n+### Printing\n+\n+`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process\n+\n+```diff\n+- print(\"My thing I want to print!\")\n++ accelerator.print(\"My thing I want to print!\")\n+```\n+\n+### Executing processes\n+\n+#### Once on a single server\n+\n+For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]:\n+\n+```python\n+if accelerator.is_local_main_process:\n+ do_thing_once_per_server()\n+```\n+\n+A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same \n+behavior on a function's execution:\n+\n+```python\n+@accelerator.on_local_main_process\n+def do_my_thing():\n+ \"Something done once per server\"\n+ do_thing_once_per_server()\n+```\n+\n+#### Only ever once across all servers\n+\n+For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]:\n+\n+```python\n+if accelerator.is_main_process:\n+ do_thing_once()\n+```\n+\n+A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same \n+behavior on a function's execution:\n+\n+```python\n+@accelerator.on_main_process\n+def do_my_thing():\n+ \"Something done once per server\"\n+ do_thing_once()\n+```\n+\n+#### On specific processes\n+\n+If a function should be ran on a specific overall or local process index, there are similar decorators \n+to achieve this:\n+\n+```python\n+@accelerator.on_local_process(local_process_idx=0)\n+def do_my_thing():\n+ \"Something done on process index 0 on each server\"\n+ do_thing_on_index_zero_on_each_server()\n+```\n+\n+```python\n+@accelerator.on_process(process_index=0)\n+def do_my_thing():\n+ \"Something done on process index 0\"\n+ do_thing_on_index_zero()\n+```\n+\n+### Synchronicity control\n+\n+Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance)\n+\n+### Saving and loading\n+\n+Use [`~Accelerator.unwrap_model`] before saving to remove all special model wrappers added during the distributed process. \n+\n+```python\n+model = MyModel()\n+model = accelerator.prepare(model)\n+# Unwrap\n+model = accelerator.unwrap_model(model)\n+```\n+\n+Use [`~Accelerator.save`] instead of `torch.save`:\n+\n+```diff\n+ state_dict = model.state_dict()\n+- torch.save(state_dict, \"my_state.pkl\")\n++ accelerator.save(state_dict, \"my_state.pkl\")\n+```\n+\n+### Operations\n+\n+Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value``\n+\n+### Gradient Accumulation\n+\n+To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps. \n+This will also automatically ensure the gradients are synced or unsynced when on \n+multi-device training, check if the step should actually be performed, and auto-scale the loss:\n+\n+```diff\n+- accelerator = Accelerator()\n++ accelerator = Accelerator(gradient_accumulation_steps=2)\n+\n+ for (input, label) in training_dataloader:\n++ with accelerator.accumulate(model):\n+ predictions = model(input)\n+ loss = loss_function(predictions, labels)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+ optimizer.zero_grad()\n+```\n+\n+## Overall API documentation:\n+\n+[[autodoc]] Accelerator\n\\ No newline at end of file\ndiff --git a/docs/source/package_reference/big_modeling.mdx b/docs/source/package_reference/big_modeling.mdx\nnew file mode 100644\nindex 000000000..e54ac0807\n--- /dev/null\n+++ b/docs/source/package_reference/big_modeling.mdx\n@@ -0,0 +1,41 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Working with large models\n+\n+## Dispatching and Offloading Models\n+\n+[[autodoc]] big_modeling.init_empty_weights\n+[[autodoc]] big_modeling.cpu_offload\n+[[autodoc]] big_modeling.disk_offload\n+[[autodoc]] big_modeling.dispatch_model\n+[[autodoc]] big_modeling.load_checkpoint_and_dispatch\n+\n+## Model Hooks\n+\n+### Hook Classes\n+\n+[[autodoc]] hooks.ModelHook\n+[[autodoc]] hooks.AlignDevicesHook\n+[[autodoc]] hooks.SequentialHook\n+\n+### Adding Hooks\n+\n+[[autodoc]] hooks.add_hook_to_module\n+[[autodoc]] hooks.attach_execution_device_hook\n+[[autodoc]] hooks.attach_align_device_hook\n+[[autodoc]] hooks.attach_align_device_hook_on_blocks\n+\n+### Removing Hooks\n+\n+[[autodoc]] hooks.remove_hook_from_module\n+[[autodoc]] hooks.remove_hook_from_submodules\n\\ No newline at end of file\ndiff --git a/docs/source/package_reference/cli.mdx b/docs/source/package_reference/cli.mdx\nnew file mode 100644\nindex 000000000..092b72850\n--- /dev/null\n+++ b/docs/source/package_reference/cli.mdx\n@@ -0,0 +1,152 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# The Command Line \n+\n+Below is a list of all the available commands πŸ€— Accelerate with their parameters\n+\n+## accelerate config\n+\n+**Command**:\n+\n+`accelerate config` or `accelerate-config`\n+\n+Launches a series of prompts to create and save a `default_config.yml` configuration file for your training system. Should \n+always be ran first on your machine.\n+\n+**Usage**: \n+\n+```bash\n+accelerate config [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content\n+ of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+\n+## accelerate env\n+\n+**Command**:\n+\n+`accelerate env` or `accelerate-env`\n+\n+Lists the contents of the passed πŸ€— Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate).\n+\n+**Usage**:\n+\n+```bash\n+accelerate env [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content\n+ of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+\n+## accelerate launch\n+\n+**Command**:\n+\n+`accelerate launch` or `accelerate-launch`\n+\n+Launches a specified script on a distributed system with the right parameters.\n+\n+**Usage**: \n+\n+```bash\n+accelerate launch [arguments] {training_script} --{training_script-argument-1} --{training_script-argument-2} ...\n+```\n+\n+**Positional Arguments**:\n+\n+- `{training_script}` -- The full path to the script to be launched in parallel\n+- `--{training_script-argument-1}` -- Arguments of the training script\n+\n+**Optional Arguments**:\n+\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\n+* `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script.\n+* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.\n+* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on\n+ Nvidia Ampere GPUs and PyTorch 1.10 or later.\n+* `--multi_gpu` (`bool`, defaults to `False`) -- Whether or not this should launch a distributed GPU training.\n+* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\n+* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\n+\n+The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their \n+values. They can also be passed in manually.\n+\n+**Machine Configuration Arguments**:\n+\n+The following arguments are useful for customization of worker machines\n+* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.\n+* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.\n+* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.\n+* `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.\n+* `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.\n+* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.\n+\n+\n+**DeepSpeed Arguments**:\n+\n+The following arguments are only useful when `use_deepspeed` is passed: \n+* `--use_deepspeed` (`bool`) -- Whether to use deepspeed.\n+* `--deepspeed_config_file DEEPSPEED_CONFIG_FILE` (`str`) -- DeepSpeed config file.\n+* `--zero_stage ZERO_STAGE` (`str`) -- DeepSpeed's ZeRO optimization stage\n+* `--offload_optimizer_device OFFLOAD_OPTIMIZER_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states\n+* `--offload_param_device OFFLOAD_PARAM_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload parameters\n+* `--gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS` (`int`) -- Number of gradient_accumulation_steps used in your training script\n+* `--gradient_clipping GRADIENT_CLIPPING` (`float`) -- gradient clipping value used in your training script\n+The following arguments are related to using ZeRO Stage-3\n+* `--zero3_init_flag ZERO3_INIT_FLAG` (`bool`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models\n+* `--zero3_save_16bit_model ZERO3_SAVE_16BIT_MODEL` (`bool`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3\n+\n+**Fully Sharded Data Parallelism Arguments**:\n+\n+The following arguments are only useful when `use_fdsp` is passed:\n+* `--use_fsdp` (`bool`) -- Whether to use fsdp.\n+* `--offload_params OFFLOAD_PARAMS` (`bool`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.\n+* `--min_num_params MIN_NUM_PARAMS` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.\n+* `--sharding_strategy SHARDING_STRATEGY` (`str`) -- FSDP's Sharding Strategy.\n+\n+**TPU Arguments**:\n+\n+The following arguments are only useful when `tpu` is passed:\n+* `--tpu` (`bool`) - Whether or not this should launch a TPU training.\n+* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.\n+\n+**AWS SageMaker Arguments**:\n+\n+The following arguments are only useful when training in SageMaker\n+* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\n+* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\n+\n+## accelerate test\n+\n+`accelerate test` or `accelerate-test`\n+\n+Runs `accelerate/test_utils/test_script.py` to verify that πŸ€— Accelerate has been properly configured on your system and runs. \n+\n+**Usage**: \n+\n+```bash\n+accelerate test [arguments]\n+```\n+\n+**Optional Arguments**:\n+* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content\n+ of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory\n+ (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.\n+* `-h`, `--help` (`bool`) -- Show a help message and exit\ndiff --git a/docs/source/package_reference/deepspeed.mdx b/docs/source/package_reference/deepspeed.mdx\nnew file mode 100644\nindex 000000000..fee886f4e\n--- /dev/null\n+++ b/docs/source/package_reference/deepspeed.mdx\n@@ -0,0 +1,25 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Utilities for DeepSpeed\n+\n+[[autodoc]] utils.DeepSpeedPlugin\n+\n+[[autodoc]] utils.DummyOptim\n+\n+[[autodoc]] utils.DummyScheduler\n+\n+[[autodoc]] utils.DeepSpeedEngineWrapper\n+\n+[[autodoc]] utils.DeepSpeedOptimizerWrapper\n+\n+[[autodoc]] utils.DeepSpeedSchedulerWrapper\ndiff --git a/docs/source/kwargs.mdx b/docs/source/package_reference/kwargs.mdx\nsimilarity index 100%\nrename from docs/source/kwargs.mdx\nrename to docs/source/package_reference/kwargs.mdx\ndiff --git a/docs/source/package_reference/launchers.mdx b/docs/source/package_reference/launchers.mdx\nnew file mode 100644\nindex 000000000..6f37f0af0\n--- /dev/null\n+++ b/docs/source/package_reference/launchers.mdx\n@@ -0,0 +1,19 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launchers\n+\n+Functions for launching training on distributed processes.\n+\n+\n+[[autodoc]] accelerate.notebook_launcher\n+[[autodoc]] accelerate.debug_launcher\n\\ No newline at end of file\ndiff --git a/docs/source/package_reference/logging.mdx b/docs/source/package_reference/logging.mdx\nnew file mode 100644\nindex 000000000..675af41ee\n--- /dev/null\n+++ b/docs/source/package_reference/logging.mdx\n@@ -0,0 +1,24 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Logging with Accelerate\n+\n+Accelerate has its own logging utility to handle logging while in a distributed system.\n+To utilize this replace cases of `logging` with `accelerate.logging`:\n+```diff\n+- import logging\n++ from accelerate.logging import get_logger\n+- logger = logging.getLogger(__name__)\n++ logger = get_logger(__name__)\n+```\n+\n+[[autodoc]] logging.get_logger\n\\ No newline at end of file\ndiff --git a/docs/source/package_reference/state.mdx b/docs/source/package_reference/state.mdx\nnew file mode 100644\nindex 000000000..f1f5ef974\n--- /dev/null\n+++ b/docs/source/package_reference/state.mdx\n@@ -0,0 +1,23 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Stateful Classes\n+\n+Below are variations of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n+instances share the same state, which is initialized on the first instantiation.\n+\n+These classes are immutable and store information about certain configurations or \n+states.\n+\n+[[autodoc]] state.AcceleratorState\n+\n+[[autodoc]] state.GradientState\n\\ No newline at end of file\ndiff --git a/docs/source/internal.mdx b/docs/source/package_reference/torch_wrappers.mdx\nsimilarity index 64%\nrename from docs/source/internal.mdx\nrename to docs/source/package_reference/torch_wrappers.mdx\nindex 4ff1d6ff9..4ac8ae572 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/package_reference/torch_wrappers.mdx\n@@ -10,44 +10,24 @@ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o\n specific language governing permissions and limitations under the License.\n -->\n \n-# Internals\n+# Wrapper classes for torch Dataloaders, Optimizers, and Schedulers\n \n-## Gradient Accumulation states\n+The internal classes Accelerate uses to prepare objects for distributed training\n+when calling [`~Accelerator.prepare`].\n \n-[[autodoc]] state.GradientState\n-\n-## Optimizer\n-\n-[[autodoc]] optimizer.AcceleratedOptimizer\n-\n-## DataLoader\n-\n-The main work on your PyTorch `DataLoader` is done by the following function:\n+## Datasets and DataLoaders\n \n [[autodoc]] data_loader.prepare_data_loader\n \n-### DataLoaderShard\n-\n-[[autodoc]] data_loader.DataLoaderShard\n-\n-### BatchSamplerShard\n-\n [[autodoc]] data_loader.BatchSamplerShard\n-\n-### IterableDatasetShard\n-\n [[autodoc]] data_loader.IterableDatasetShard\n+[[autodoc]] data_loader.DataLoaderShard\n+[[autodoc]] data_loader.DataLoaderDispatcher\n \n-## Scheduler\n-\n-[[autodoc]] scheduler.AcceleratedScheduler\n-\n-## Distributed Config\n-\n-### AcceleratorState\n+## Optimizers \n \n-[[autodoc]] state.AcceleratorState\n+[[autodoc]] optimizer.AcceleratedOptimizer\n \n-## Tracking\n+## Schedulers \n \n-[[autodoc]] tracking.GeneralTracker\n+[[autodoc]] scheduler.AcceleratedScheduler\n\\ No newline at end of file\ndiff --git a/docs/source/package_reference/tracking.mdx b/docs/source/package_reference/tracking.mdx\nnew file mode 100644\nindex 000000000..5e7a97f24\n--- /dev/null\n+++ b/docs/source/package_reference/tracking.mdx\n@@ -0,0 +1,26 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Experiment Tracking\n+\n+## The Base Tracker Class\n+\n+[[autodoc]] tracking.GeneralTracker\n+\n+## Integrated Trackers\n+\n+[[autodoc]] tracking.TensorBoardTracker\n+ - __init__\n+[[autodoc]] tracking.WandBTracker\n+ - __init__\n+[[autodoc]] tracking.CometMLTracker\n+ - __init__\ndiff --git a/docs/source/utilities.mdx b/docs/source/package_reference/utilities.mdx\nsimilarity index 97%\nrename from docs/source/utilities.mdx\nrename to docs/source/package_reference/utilities.mdx\nindex bfcaeb60e..0c64953fc 100644\n--- a/docs/source/utilities.mdx\n+++ b/docs/source/package_reference/utilities.mdx\n@@ -44,8 +44,6 @@ These include data operations that mimic the same `torch` ops but can be used on\n \n These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed. \n \n-[[autodoc]] utils.get_max_memory\n-\n [[autodoc]] utils.is_bf16_available\n \n [[autodoc]] utils.is_torch_version\n@@ -58,6 +56,12 @@ These functionalities check the state of the current working environment includi\n \n When setting up πŸ€— Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.\n \n+## Memory\n+\n+[[autodoc]] utils.get_max_memory\n+\n+[[autodoc]] utils.find_executable_batch_size\n+\n ## Modeling\n \n These utilities relate to interacting with PyTorch models\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 4b144ba05..812b20226 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -18,7 +18,7 @@ Let's have a look at the πŸ€— Accelerate main features and traps to avoid.\n \n To use πŸ€— Accelerate in your own script, you have to change four things:\n \n-1. Import the [`Accelerator`] main class instantiate one in an `accelerator` object:\n+1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object:\n \n ```python\n from accelerate import Accelerator\n@@ -28,7 +28,7 @@ accelerator = Accelerator()\n \n This should happen as early as possible in your training script as it will initialize everything necessary for\n distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one\n-match with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.\n+machines with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.\n \n 2. Remove the call `.to(device)` or `.cuda()` for your model and input data. The `accelerator` object\n will handle this for you and place all those objects on the right device for you. If you know what you're doing, you\n@@ -40,8 +40,8 @@ To fully deactivate the automatic device placement, pass along `device_placement\n \n <Tip warning={true}>\n \n-If you place your objects manually on the proper device, be careful to create your optimizer after putting your\n-model on `accelerator.device` or your training will fail on TPU.\n+ If you place your objects manually on the proper device, be careful to create your optimizer after putting your\n+ model on `accelerator.device` or your training will fail on TPU.\n \n </Tip>\n \n@@ -61,9 +61,9 @@ use `shuffle=True` or any kind of random sampler).\n \n <Tip>\n \n-The actual batch size for your training will be the number of devices used multiplied by the batch size you set in\n-your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will\n-train at an actual batch size of 64.\n+ The actual batch size for your training will be the number of devices used multiplied by the batch size you set in\n+ your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will\n+ train at an actual batch size of 64.\n \n </Tip>\n \n@@ -76,16 +76,16 @@ training loop.\n \n <Tip warning={true}>\n \n-You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped\n-at each optimizer step.\n+ You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped\n+ at each optimizer step.\n \n </Tip>\n \n <Tip warning={true}>\n \n-Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its\n-length divided by X (since your actual batch size will be multiplied by X), unless you set\n-`split_batches=True`.\n+ Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its\n+ length divided by X (since your actual batch size will be multiplied by X), unless you set\n+ `split_batches=True`.\n \n </Tip>\n \n@@ -133,27 +133,32 @@ for inputs, targets in validation_dataloader:\n \n <Tip warning={true}>\n \n-As for the training dataloader, passing your validation dataloader through\n-[`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X\n-(since your actual batch size will be multiplied by X), unless you set `split_batches=True`.\n+ Similar to the training dataloader, passing your validation dataloader through\n+ [`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X\n+ (since your actual batch size will be multiplied by X), unless you set `split_batches=True`.\n+\n+</Tip>\n \n Any instruction using your training dataloader length (for instance if you need the number of total training steps\n-to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n+to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`]. \n \n-As some data at the end of the dataset may be duplicated so the batch can divide equally to all workers, metrics should be \n-calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data.\n+Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result, metrics\n+should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data while gathering.\n \n-If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather \n-the data across all processes and this can manually be done instead.\n+<Tip>\n+\n+ If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather \n+ the data across all processes and this can manually be done instead.\n \n </Tip>\n \n+\n <Tip warning={true}>\n \n-The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If\n-you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in\n-a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the\n-biggest size across processes.\n+ The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If\n+ you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in\n+ a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the\n+ biggest size across processes.\n \n </Tip>\n \n@@ -163,7 +168,7 @@ You can use the regular commands to launch your distributed training (like `torc\n PyTorch), they are fully compatible with πŸ€— Accelerate. The only caveat here is that πŸ€— Accelerate uses the environment\n to determine all useful information, so `torch.distributed.launch` should be used with the flag `--use_env`.\n \n-πŸ€— Accelerate also provides a CLI tool that unifies all launcher, so you only have to remember one command. To use it,\n+πŸ€— Accelerate also provides a CLI tool that unifies all launchers, so you only have to remember one command. To use it,\n just run:\n \n ```bash\n@@ -207,7 +212,10 @@ If you stored the config file in a non-default location, you can indicate it to\n accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script\n ```\n \n-You can also override any of the arguments determined by your config file, see TODO: insert ref here.\n+You can also override any of the arguments determined by your config file. \n+To see the complete list of parameters that you can pass in, run `accelerate launch -h`. \n+\n+Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts. \n \n \n ## Launching training from a notebook\n@@ -227,11 +235,14 @@ notebook_launcher(training_function)\n \n <Tip warning={true}>\n \n-Your `Accelerator` object should only be defined inside the training function. This is because the\n-initialization should be done inside the launcher only.\n+ Your [`Accelerator`] object should only be defined inside the training function. This is because the\n+ initialization should be done inside the launcher only.\n \n </Tip>\n \n+Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs. \n+\n+\n ## Training on TPU\n \n If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs\n@@ -242,7 +253,7 @@ optimizations takes some time.\n The good news is that this compilation will be cached so the second step and all the following will be much faster. The\n bad news is that it only applies if all of your steps do exactly the same operations, which implies:\n \n-- having all tensors of the same length in all your lengths\n+- having all tensors of the same length in all your batches\n - having static code (i.e., not a for loop of length that could change from step to step)\n \n Having any of the things above change between two steps will trigger a new compilation which will, once again, take a\n@@ -271,6 +282,8 @@ passed your model to [`~Accelerator.prepare`]) will break the tying. You will ne\n after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in\n the Transformers repository.\n \n+Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs. \n+\n \n ## Other caveats\n \n@@ -357,18 +370,21 @@ Note that since all the model parameters are references to tensors, this will lo\n ## Saving/loading entire states\n \n When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.\n-You can use `accelerator.save_state` and `accelerator.load_state` respectively to do so, just by simply passing in a save location. \n-If you have registered any other stateful items to be stored through `accelerator.register_for_checkpointing` they will also be saved and/or loaded.\n+You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so, just by simply passing in a save location. \n+If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.\n+\n <Tip>\n- Every object passed to `register_for_checkpointing` must have a `load_state_dict` and `save_dict` function to be stored\n+\n+ Every object passed to [`~Accelerator.register_for_checkpointing`] must have a `load_state_dict` and `state_dict` function to be stored\n+\n </Tip>\n \n \n ### Gradient clipping\n \n If you are using gradient clipping in your script, you should replace the calls to\n-`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with `accelerator.clip_grad_norm_`\n-and `accelerator.clip_grad_value_` respectively.\n+`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]\n+and [`~Accelerator.clip_grad_value_`] respectively.\n \n \n ### Mixed Precision training\n@@ -397,6 +413,26 @@ if not accelerator.optimizer_step_was_skipped:\n lr_scheduler.step()\n ```\n \n+### Gradient Accumulation \n+\n+To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`. \n+This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should\n+actually be performed, and auto-scale the loss:\n+\n+```python\n+accelerator = Accelerator(gradient_accumulation_steps=2)\n+model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)\n+\n+for input, label in training_dataloader:\n+ with accelerator.accumulate(model):\n+ predictions = model(input)\n+ loss = loss_function(predictions, label)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+ optimizer.zero_grad()\n+```\n+\n ### DeepSpeed\n \n DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight\n@@ -405,7 +441,7 @@ will be added in a next version.\n \n <Tip warning={true}>\n \n-The [`notebook_launcher`] does not support the DeepSpeed integration yet.\n+ The [`notebook_launcher`] does not support the DeepSpeed integration yet.\n \n </Tip>\n \n@@ -415,7 +451,7 @@ Internally, the library works by first analyzing the environment in which the sc\n kind of distributed setup is used, how many different processes there are and which one the current script is in. All\n that information is stored in the [`~AcceleratorState`].\n \n-This class is initialized the first time you instantiate a [`Accelerator`] as well as performing any\n+This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any\n specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of\n [`~state.AcceleratorState`].\n \n@@ -448,18 +484,18 @@ setting the same seed in the main random number generator in all processes.\n \n <Tip warning={true}>\n \n-Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random\n-artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get\n-the same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n-controlled by torch).\n+ Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random\n+ artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get\n+ the same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n+ controlled by torch).\n \n </Tip>\n \n <Tip>\n \n-The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local\n-`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.\n+ The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local\n+ `torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.\n \n </Tip>\n \n-For more details about the internals, see the [Internals page](internal).\n+For more details about the internals, see the [Internals page](package_reference/torch_wrappers).\ndiff --git a/docs/source/big_modeling.mdx b/docs/source/usage_guides/big_modeling.mdx\nsimilarity index 87%\nrename from docs/source/big_modeling.mdx\nrename to docs/source/usage_guides/big_modeling.mdx\nindex 006c8bc4e..243b354d0 100644\n--- a/docs/source/big_modeling.mdx\n+++ b/docs/source/usage_guides/big_modeling.mdx\n@@ -31,7 +31,7 @@ While this works very well for regularly sized models, this workflow has some cl\n \n <Tip warning={true}>\n \n-This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.\n+ This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.\n \n </Tip>\n \n@@ -57,7 +57,7 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen\n \n <Tip warning={true}>\n \n-You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device.\n+ You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device.\n \n </Tip>\n \n@@ -199,7 +199,7 @@ This way, you model can run for inference even if it doesn't fit on one of the G\n \n <Tip warning={true}>\n \n-This only supports inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.\n+ This only supports inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.\n \n </Tip>\n \n@@ -209,7 +209,7 @@ You can let πŸ€— Accelerate handle the device map computation by setting `device\n \n <Tip>\n \n-You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n+ You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device.\n \n </Tip>\n \n@@ -222,7 +222,7 @@ When you have more GPU memory available than the model size, here the difference\n \n <Tip>\n \n-The options `\"auto\"` and `\"balanced\"` produce the same results for now, but the behavior of `\"auto\"` might change in the future if we find a strategy that makes more sense, while `\"balanced\"` will stay stable.\n+ The options `\"auto\"` and `\"balanced\"` produce the same results for now, but the behavior of `\"auto\"` might change in the future if we find a strategy that makes more sense, while `\"balanced\"` will stay stable.\n \n </Tip>\n \n@@ -238,9 +238,9 @@ device_map = infer_auto_device_map(my_model, max_memory={0: \"10GiB\", 1: \"10GiB\",\n \n <Tip warning={true}>\n \n-When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.\n+ When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.\n \n-Therefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors.\n+ Therefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors.\n \n </Tip>\n \n@@ -271,7 +271,7 @@ device_map = {\"block1\": 0, \"block2.linear1\": 1, \"block2.linear2\": 1}\n \n <Tip>\n \n-To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs.\n+ To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs.\n \n </Tip>\n \n@@ -285,20 +285,4 @@ We are aware of the current limitations in the API:\n - [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.\n - The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle.\n - When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before.\n-- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).\n-\n-## API doc\n-\n-[[autodoc]] cpu_offload\n-\n-[[autodoc]] disk_offload\n-\n-[[autodoc]] dispatch_model\n-\n-[[autodoc]] infer_auto_device_map\n-\n-[[autodoc]] init_empty_weights\n-\n-[[autodoc]] load_checkpoint_and_dispatch\n-\n-[[autodoc]] load_checkpoint_in_model\n+- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).\n\\ No newline at end of file\ndiff --git a/docs/source/checkpoint.mdx b/docs/source/usage_guides/checkpoint.mdx\nsimilarity index 100%\nrename from docs/source/checkpoint.mdx\nrename to docs/source/usage_guides/checkpoint.mdx\ndiff --git a/docs/source/deepspeed.mdx b/docs/source/usage_guides/deepspeed.mdx\nsimilarity index 98%\nrename from docs/source/deepspeed.mdx\nrename to docs/source/usage_guides/deepspeed.mdx\nindex c0385c352..8a4238732 100644\n--- a/docs/source/deepspeed.mdx\n+++ b/docs/source/usage_guides/deepspeed.mdx\n@@ -472,25 +472,11 @@ model, eval_dataloader = accelerator.prepare(model, eval_dataloader)\n \n 1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed.\n 2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM. \n-3. Current integration doesn’t support multiple models for a given `accelerator` object. \n+3. Current integration doesn’t support multiple models. \n \n+## DeepSpeed Resources\n \n-## Internals\n-\n-[[autodoc]] utils.DeepSpeedPlugin\n-\n-[[autodoc]] utils.DummyOptim\n-\n-[[autodoc]] utils.DummyScheduler\n-\n-[[autodoc]] utils.DeepSpeedEngineWrapper\n-\n-[[autodoc]] utils.DeepSpeedOptimizerWrapper\n-\n-[[autodoc]] utils.DeepSpeedSchedulerWrapper\n-\n-\n-## Main DeepSpeed Resources\n+The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed).\n \n - [Project's github](https://github.com/microsoft/deepspeed)\n - [Usage docs](https://www.deepspeed.ai/getting-started/)\ndiff --git a/docs/source/fsdp.mdx b/docs/source/usage_guides/fsdp.mdx\nsimilarity index 73%\nrename from docs/source/fsdp.mdx\nrename to docs/source/usage_guides/fsdp.mdx\nindex 32f982786..a561e4ecd 100644\n--- a/docs/source/fsdp.mdx\n+++ b/docs/source/usage_guides/fsdp.mdx\n@@ -39,10 +39,14 @@ For instance, here is how you would run the NLP example (from the root of the re\n compute_environment: LOCAL_MACHINE\n deepspeed_config: {}\n distributed_type: FSDP\n+downcast_bf16: 'no'\n fsdp_config:\n- min_num_params: 2000\n- offload_params: false\n- sharding_strategy: 1\n+ fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP\n+ fsdp_backward_prefetch_policy: BACKWARD_PRE\n+ fsdp_offload_params: false\n+ fsdp_sharding_strategy: 1\n+ fsdp_state_dict_type: FULL_STATE_DICT\n+ fsdp_transformer_layer_cls_to_wrap: GPT2Block\n machine_rank: 0\n main_process_ip: null\n main_process_port: null\n@@ -60,9 +64,13 @@ accelerate launch examples/nlp_example.py\n Currently, `Accelerate` supports the following config through the CLI:\n \n ```bash\n-`Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n-`Min Num Params`: FSDP\\'s minimum number of parameters for Default Auto Wrapping.\n-`Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n+`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD\n+`Offload Params`: Decides Whether to offload parameters and gradients to CPU\n+`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP \n+`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block`...\n+`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP`\n+`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH\n+`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT \n ```\n \n ## A few caveats to be aware of\n@@ -76,19 +84,18 @@ Currently, `Accelerate` supports the following config through the CLI:\n However, below is the recommended way to prepare model and optimizer while using FSDP:\n \n ```diff\n-model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n + model = accelerator.prepare(model)\n \n-optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n+ optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n \n-- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(model,\n-- optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n - )\n \n + optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n + optimizer, train_dataloader, eval_dataloader, lr_scheduler\n-+ )\n-\n++ )\n ```\n \n - In case of a single model, if you have created the optimizer with multiple parameter groups and called prepare with them together,\n@@ -110,11 +117,9 @@ optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n ```\n \n \n-- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error.\n-- Mixed precision is currently not supported with FSDP.\n+- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error. \n+Then pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.\n+- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of πŸ€— `Transformers` library.\n \n-For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\n-After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n+For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.\n-\n-[[autodoc]] utils.FullyShardedDataParallelPlugin\ndiff --git a/docs/source/gradient_accumulation.mdx b/docs/source/usage_guides/gradient_accumulation.mdx\nsimilarity index 86%\nrename from docs/source/gradient_accumulation.mdx\nrename to docs/source/usage_guides/gradient_accumulation.mdx\nindex f6ab857d8..f9f5dc62a 100644\n--- a/docs/source/gradient_accumulation.mdx\n+++ b/docs/source/usage_guides/gradient_accumulation.mdx\n@@ -71,13 +71,15 @@ First the code shown earlier will be converted to utilize πŸ€— Accelerate withou\n ```\n \n <Tip warning={true}>\n-In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization.\n+\n+ In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](concept_guides/gradient_synchronization)!\n+\n </Tip>\n \n ## Letting πŸ€— Accelerate handle gradient accumulation\n \n All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number \n-of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n+of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`~Accelerator.backward`]:\n \n ```diff\n from accelerate import Accelerator\n@@ -85,19 +87,19 @@ of steps to perform before each call to `step()` and how to automatically adjust\n + accelerator = Accelerator(gradient_accumulation_steps=2)\n ```\n \n-From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n-You just wrap it around the entire training part of your code: \n+From here you can use the [`~Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n+You just wrap it around the entire training part of our code: \n \n ```diff\n - for index, batch in enumerate(training_dataloader):\n + for batch in training_dataloader:\n-+ with accelerator.accumulate(model):\n++ with accelerator.accumulate(model):\n optimizer.zero_grad()\n inputs, targets = batch\n outputs = model(inputs)\n ```\n \n-and you can remove all the special checks for the step number and the loss adjustment:\n+You can remove all the special checks for the step number and the loss adjustment:\n \n ```diff\n - loss = loss / gradient_accumulation_steps\ndiff --git a/docs/source/memory.mdx b/docs/source/usage_guides/memory.mdx\nsimilarity index 95%\nrename from docs/source/memory.mdx\nrename to docs/source/usage_guides/memory.mdx\nindex e578e43d7..9c5674977 100644\n--- a/docs/source/memory.mdx\n+++ b/docs/source/usage_guides/memory.mdx\n@@ -48,4 +48,4 @@ def training_function(args):\n + inner_training_loop()\n ```\n \n-[[autodoc]] utils.find_executable_batch_size\n\\ No newline at end of file\n+To find out more, check the documentation [here](package_reference/utilities#accelerate.find_executable_batch_size)\n\\ No newline at end of file\ndiff --git a/docs/source/sagemaker.mdx b/docs/source/usage_guides/sagemaker.mdx\nsimilarity index 90%\nrename from docs/source/sagemaker.mdx\nrename to docs/source/usage_guides/sagemaker.mdx\nindex 563f0c742..6195783e9 100644\n--- a/docs/source/sagemaker.mdx\n+++ b/docs/source/usage_guides/sagemaker.mdx\n@@ -54,7 +54,7 @@ accelerate config\n \n <Tip>\n \n-πŸ€— Accelerate is not saving any of your credentials.\n+ πŸ€— Accelerate is not saving any of your credentials.\n \n </Tip>\n \n@@ -72,8 +72,8 @@ directory. After training, artifacts in this directory are uploaded to S3:\n \n <Tip warning={true}>\n \n-SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n-specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script).\n+ SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n+ specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script).\n \n </Tip>\n \n@@ -92,7 +92,7 @@ arguments needed by your training script as named arguments.\n \n <Tip>\n \n-If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it.\n+ If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it.\n \n </Tip>\n \ndiff --git a/docs/source/tracking.mdx b/docs/source/usage_guides/tracking.mdx\nsimilarity index 85%\nrename from docs/source/tracking.mdx\nrename to docs/source/usage_guides/tracking.mdx\nindex 25e44b3c9..2fb68c0f2 100644\n--- a/docs/source/tracking.mdx\n+++ b/docs/source/usage_guides/tracking.mdx\n@@ -19,12 +19,9 @@ There are a large number of experiment tracking API's available, however getting\n \n Currently `Accelerate` supports three trackers out-of-the-box:\n \n-\n-[[autodoc]] tracking.TensorBoardTracker\n-\n-[[autodoc]] tracking.WandBTracker\n-\n-[[autodoc]] tracking.CometMLTracker\n+- TensorBoard\n+- WandB\n+- CometML\n \n To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:\n ```python\n@@ -107,6 +104,9 @@ import wandb\n \n \n class MyCustomTracker(GeneralTracker):\n+ name = \"wandb\"\n+ requires_logging_directory = False\n+\n def __init__(self, run_name: str):\n self.run_name = run_name\n wandb.init(self.run_name)\n@@ -137,27 +137,27 @@ accelerator = Accelerator(log_with=[tracker, \"all\"])\n \n If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:\n ```diff\n-from accelerate import Accelerator\n+ from accelerate import Accelerator\n + import neptune.new as neptune\n \n-accelerator = Accelerator()\n+ accelerator = Accelerator()\n + run = neptune.init(...)\n \n-my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n-device = accelerator.device\n-my_model.to(device)\n-\n-for iteration in config[\"num_iterations\"]:\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- total_loss += loss\n- accelerator.backward(loss)\n- my_optimizer.step()\n-+ if accelerator.is_main_process:\n-+ run[\"logs/training/batch/loss\"].log(loss)\n+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+ device = accelerator.device\n+ my_model.to(device)\n+\n+ for iteration in config[\"num_iterations\"]:\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ total_loss += loss\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n++ if accelerator.is_main_process:\n++ run[\"logs/training/batch/loss\"].log(loss)\n ```\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 977cd5d24..41fc1e3d4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -140,10 +140,20 @@ class Accelerator:\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\n are created. See [kwargs](kwargs) for more information.\n \n- Attributes\n+ **Attributes:**\n \n - **device** (`torch.device`) -- The device to use.\n+ - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.\n+ - **local_process_index** (`int`) -- The process index on the current machine.\n+ - **mixed_precision** (`str`) -- The configured mixed precision mode.\n+ - **num_processes** (`int`) -- The total number of processes used for training.\n+ - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of\n+ gradient overflow in mixed precision), in which\n+ case the learning rate should not be changed.\n+ - **process_index** (`int`) -- The overall index of the current process among all processes.\n - **state** ([`~state.AcceleratorState`]) -- The distributed setup state.\n+ - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.\n+ - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.\n \"\"\"\n \n def __init__(\n@@ -308,6 +318,9 @@ def __init__(\n \n @property\n def use_distributed(self):\n+ \"\"\"\n+ Whether the Accelerator is configured for distributed training\n+ \"\"\"\n return self.distributed_type != DistributedType.NO and self.num_processes > 1\n \n @property\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 27a627ae9..a4f6bac2a 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -47,20 +47,18 @@ def parse_choice_from_env(key, default=\"no\"):\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\n- This is a variation of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n- instance of `AcceleratorState` share the same state, which is initialized on the first instantiation.\n+ Singleton class that has information about the current training environment.\n \n- Attributes:\n+ **Attributes:**\n \n - **device** (`torch.device`) -- The device to use.\n- - **sync_gradients** (`bool`) -- Whether to sync the gradients or not\n- - **distributed_type** (`~accelerate.state.DistributedType`) -- The type of distributed environment currently\n+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently\n in use.\n- - **num_processes** (`int`) -- The number of processes currently launched in parallel.\n- - **process_index** (`int`) -- The index of the current process.\n - **local_process_index** (`int`) -- The index of the current process on the current server.\n - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision. If you are using\n mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.\n+ - **process_index** (`int`) -- The index of the current process.\n \"\"\"\n \n _shared_state = {}\n@@ -250,13 +248,10 @@ def _check_initialized(self, mixed_precision=None, cpu=None):\n \n class GradientState:\n \"\"\"\n- This is a variation of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n- instance of `GradientState` share the same state, which is initialized on the first instantiation.\n+ Singleton class that has information related to gradient synchronization for gradient accumulation\n \n- This specific state revolves around whether gradients should be synced and if we have reached the end of a prepared\n- dataloader Attributes:\n+ **Attributes:**\n \n- - **sync_gradients** (`bool`) -- Whether the gradients should be synced\n - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\n - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader\n \"\"\"\n", "code_comments": [ { "body": "missing the use of the context manager below", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only performing the step through the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilties provided in πŸ€— Accelerate,\n+which can total to adding just one line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```python\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```python\n+from accelerate import Accelerator\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to `Accelerator`, dictating the number \n+of steps to perform before each call to `step()` and `backward()`:\n+\n+```diff\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n+```\n+\n+From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!", "from_author": false }, { "body": "Thanks πŸ˜„ *Should* be set now, appreciate your thorough review on it! πŸ™‡ ", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only performing the step through the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilties provided in πŸ€— Accelerate,\n+which can total to adding just one line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```python\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```python\n+from accelerate import Accelerator\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to `Accelerator`, dictating the number \n+of steps to perform before each call to `step()` and `backward()`:\n+\n+```diff\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n+```\n+\n+From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!", "from_author": true }, { "body": "looking forward to this feature!", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Performing gradient accumulation with πŸ€— Accelerate\n+\n+Gradient accumulation is a technique where you can train on bigger batch sizes than \n+your machine would normally be able to fit into memory. This is done by accumulating gradients over\n+several batches, and only performing the step through the optimizer after a certain number of batches have been performed.\n+\n+While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient\n+method for doing so and you may experience considerable slowdowns!\n+\n+In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilties provided in πŸ€— Accelerate,\n+which can total to adding just one line of code!\n+\n+This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n+\n+```python\n+device = 'cuda'\n+model.to(device)\n+\n+gradient_accumulation_steps = 2\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ loss.backward()\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Converting it to πŸ€— Accelerate\n+\n+First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n+\n+```python\n+from accelerate import Accelerator\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+<Tip warning={true}>\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!\n+</Tip>\n+\n+## Letting πŸ€— Accelerate handle gradient accumulation\n+\n+All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to `Accelerator`, dictating the number \n+of steps to perform before each call to `step()` and `backward()`:\n+\n+```diff\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n+```\n+\n+From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!", "from_author": false }, { "body": "```suggestion\r\nπŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that allows the same PyTorch code be ran across any distributed configuration by adding just four lines of code!", "from_author": false }, { "body": "```suggestion\r\nplatforms separately so you don't have to. In minutes, existing codebases can be converted to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! \r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that allows the same PyTorch code be ran across any distributed configuration by adding just four lines of code!\n \n-## Features\n+Built on `torch_xla` and `torch.distributed`, Accelerate wraps around the tough and problematic code changes that need to occur on each of these\n+platforms separately so you don't have to. In minutes existing codebases can be converted to utilize [DeepSpeed](https://github.com/microsoft/DeepSpeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! ", "from_author": false }, { "body": "```suggestion\r\n<Tip> To get a better idea of this process, make sure to check out the [Tutorials](https://huggingface.co/docs/accelerate/basic_tutorials/overview) ! </Tip>\r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that allows the same PyTorch code be ran across any distributed configuration by adding just four lines of code!\n \n-## Features\n+Built on `torch_xla` and `torch.distributed`, Accelerate wraps around the tough and problematic code changes that need to occur on each of these\n+platforms separately so you don't have to. In minutes existing codebases can be converted to utilize [DeepSpeed](https://github.com/microsoft/DeepSpeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! \n \n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+For an idea of just how easy it can be, here's a brief example of the changes needed on a basic PyTorch script:\n+<Tip>To get a better idea of this process, make sure to check out the [Tutorials](https://huggingface.co/docs/accelerate/basic_tutorials/overview)!</Tip>", "from_author": false }, { "body": "I think for even greater impact, you can move the code snippet up here so users can immediately see the four lines of code and how easy it is to launch.", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.", "from_author": false }, { "body": "Maybe to avoid sounding too negative, this is a bit better?\r\n\r\n```suggestion\r\nBuilt on `torch_xla` and `torch.distributed`, πŸ€— Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.\r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n-## Features\n+Built on `torch_xla` and `torch.distributed`, Accelerate wraps around the tough and problematic code changes that need to occur on each of these", "from_author": false }, { "body": "```suggestion\r\nplatforms separately so you don't have to. Convert existing codebases to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! \r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n-## Features\n+Built on `torch_xla` and `torch.distributed`, Accelerate wraps around the tough and problematic code changes that need to occur on each of these\n+platforms separately so you don't have to. In minutes, existing codebases can be converted to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! ", "from_author": false }, { "body": "I think the Markdown link isn't rendered because there needs to be a newline after `<Tip>`.\r\n\r\n```suggestion\r\n<Tip> \r\n\r\nTo get a better idea of this process, make sure to check out the [Tutorials](https://huggingface.co/docs/accelerate/basic_tutorials/overview)! \r\n\r\n</Tip>\r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n-## Features\n+Built on `torch_xla` and `torch.distributed`, Accelerate wraps around the tough and problematic code changes that need to occur on each of these\n+platforms separately so you don't have to. In minutes, existing codebases can be converted to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! \n \n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+For an idea of just how easy it can be, here's a brief example of the changes needed on a basic PyTorch script:\n+<Tip> To get a better idea of this process, make sure to check out the [Tutorials](https://huggingface.co/docs/accelerate/basic_tutorials/overview) ! </Tip>", "from_author": false }, { "body": "The spacing is slightly off in this snippet for the lines removed and added.\r\n\r\n```suggestion\r\n- inputs = inputs.to(device)\r\n```", "diff_hunk": "@@ -12,121 +12,56 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n-## Features\n+Built on `torch_xla` and `torch.distributed`, Accelerate wraps around the tough and problematic code changes that need to occur on each of these\n+platforms separately so you don't have to. In minutes, existing codebases can be converted to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! \n \n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+For an idea of just how easy it can be, here's a brief example of the changes needed on a basic PyTorch script:\n+<Tip> To get a better idea of this process, make sure to check out the [Tutorials](https://huggingface.co/docs/accelerate/basic_tutorials/overview) ! </Tip>\n \n ```diff\n-+ from accelerate import Accelerator\n-\n-+ accelerator = Accelerator()\n- # Use the device given by the *accelerator* object.\n-+ device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n-+ )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n-```\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n \n-and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n-\n-You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n-especially for TPU training):\n-\n-```diff\n-+ from accelerate import Accelerator\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n \n-+ accelerator = Accelerator()\n-- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n-+ )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n-```\n-\n-## Script launcher\n-\n-No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training! πŸ€—\n-Accelerate comes with a CLI tool that will make your life easier when launching distributed scripts.\n-\n-On your machine(s) just run:\n-\n-```bash\n-accelerate config\n-```\n-\n-and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n-default options when doing\n-\n-```bash\n-accelerate launch my_script.py --args_to_my_script\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)", "from_author": false }, { "body": "```suggestion\r\nNew features are added every day that haven't been released yet. To try them out yourself, install\r\n```", "diff_hunk": "@@ -0,0 +1,91 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Installation and Configuration\n+\n+Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.7+**.\n+\n+## Installing πŸ€— Accelerate\n+\n+πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:\n+\n+### pip \n+\n+To install πŸ€— Accelerate from pypi, perform:\n+\n+```bash\n+pip install accelerate\n+```\n+\n+### conda\n+\n+πŸ€— Accelerate can also be installed with conda with:\n+\n+```bash\n+conda install -c conda-forge accelerate\n+```\n+\n+### Source\n+\n+As πŸ€— Accelerate is rapidly under development, new features are added every day that haven't been released yet. To try them out yourself install", "from_author": false }, { "body": "```suggestion\r\nfrom the GitHub repository:\r\n```", "diff_hunk": "@@ -0,0 +1,91 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Installation and Configuration\n+\n+Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.7+**.\n+\n+## Installing πŸ€— Accelerate\n+\n+πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:\n+\n+### pip \n+\n+To install πŸ€— Accelerate from pypi, perform:\n+\n+```bash\n+pip install accelerate\n+```\n+\n+### conda\n+\n+πŸ€— Accelerate can also be installed with conda with:\n+\n+```bash\n+conda install -c conda-forge accelerate\n+```\n+\n+### Source\n+\n+As πŸ€— Accelerate is rapidly under development, new features are added every day that haven't been released yet. To try them out yourself install\n+from the Github Repository:", "from_author": false }, { "body": "```suggestion\r\nAfter installing, you need to configure πŸ€— Accelerate for how the current system is setup for training. \r\n```", "diff_hunk": "@@ -0,0 +1,91 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Installation and Configuration\n+\n+Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.7+**.\n+\n+## Installing πŸ€— Accelerate\n+\n+πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:\n+\n+### pip \n+\n+To install πŸ€— Accelerate from pypi, perform:\n+\n+```bash\n+pip install accelerate\n+```\n+\n+### conda\n+\n+πŸ€— Accelerate can also be installed with conda with:\n+\n+```bash\n+conda install -c conda-forge accelerate\n+```\n+\n+### Source\n+\n+As πŸ€— Accelerate is rapidly under development, new features are added every day that haven't been released yet. To try them out yourself install\n+from the Github Repository:\n+\n+```bash\n+git clone https://github.com/huggingface/accelerate\n+cd accelerate\n+pip install -e .\n+```\n+\n+## Configuring πŸ€— Accelerate\n+\n+After installing, you need to tell Accelerate how the current system is setup for training. ", "from_author": false }, { "body": "```suggestion\r\nThis tutorial will detail how to easily convert existing PyTorch code to use πŸ€— Accelerate!\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!", "from_author": false }, { "body": "```suggestion\r\nYou'll see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on ", "from_author": false }, { "body": "```suggestion\r\nTo begin, write out a very basic PyTorch training loop. \r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. ", "from_author": false }, { "body": "```suggestion\r\nWe are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand", "from_author": false }, { "body": "```suggestion\r\n[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+This class will be the main force helping you with utilizing everything possible for distributed training!", "from_author": false }, { "body": "```suggestion\r\nmake sure everything is setup in the current environment for you to start training:\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+This class will be the main force helping you with utilizing everything possible for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+-device = 'cuda'\n++device = accelerator.device\n+model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`Accelerator.prepare`]. πŸ€— Accelerate will\n+then make sure that everything is setup for the environment you are currently in to start your training:", "from_author": false }, { "body": "```suggestion\r\nThese objects are returned in the same order they were sent in with. By default, `device_placement=True`, so your model is automatically moved to the proper device.\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+This class will be the main force helping you with utilizing everything possible for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+-device = 'cuda'\n++device = accelerator.device\n+model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`Accelerator.prepare`]. πŸ€— Accelerate will\n+then make sure that everything is setup for the environment you are currently in to start your training:\n+\n+```\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+```\n+These objects are returned in the same order they were sent in with. If `device_placement=True` is passed into the [`Accelerator`] object (the default), ", "from_author": false }, { "body": "I think you also need to add a newline before and after the `<Tip>` here so the code format is rendered.\r\n\r\n```suggestion\r\nAccelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+This class will be the main force helping you with utilizing everything possible for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+-device = 'cuda'\n++device = accelerator.device\n+model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`Accelerator.prepare`]. πŸ€— Accelerate will\n+then make sure that everything is setup for the environment you are currently in to start your training:\n+\n+```\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+```\n+These objects are returned in the same order they were sent in with. If `device_placement=True` is passed into the [`Accelerator`] object (the default), \n+performing this action will also automatically move your model to the proper device as well.\n+\n+<Tip warning={true}>\n+Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`)", "from_author": false }, { "body": "```suggestion\r\nFinally, three lines of code need to be changed in the training loop. πŸ€— Accelerate's DataLoader classes will automatically handle the device placement by default,\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+This class will be the main force helping you with utilizing everything possible for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+-device = 'cuda'\n++device = accelerator.device\n+model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`Accelerator.prepare`]. πŸ€— Accelerate will\n+then make sure that everything is setup for the environment you are currently in to start your training:\n+\n+```\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+```\n+These objects are returned in the same order they were sent in with. If `device_placement=True` is passed into the [`Accelerator`] object (the default), \n+performing this action will also automatically move your model to the proper device as well.\n+\n+<Tip warning={true}>\n+Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`)\n+</Tip>\n+\n+### Modifying the training loop\n+\n+Finally, three lines of code need changing in our training loop. πŸ€— Accelerate's DataLoader classes will automatically handle the device placement by default,", "from_author": false }, { "body": "Love that you added a complete version of all the snippets so users have the option of just copying the code once!", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code into utilizing πŸ€— Accelerate!\n+You'll be able to see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, first write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+This class will be the main force helping you with utilizing everything possible for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+-device = 'cuda'\n++device = accelerator.device\n+model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`Accelerator.prepare`]. πŸ€— Accelerate will\n+then make sure that everything is setup for the environment you are currently in to start your training:\n+\n+```\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+```\n+These objects are returned in the same order they were sent in with. If `device_placement=True` is passed into the [`Accelerator`] object (the default), \n+performing this action will also automatically move your model to the proper device as well.\n+\n+<Tip warning={true}>\n+Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`)\n+</Tip>\n+\n+### Modifying the training loop\n+\n+Finally, three lines of code need changing in our training loop. πŸ€— Accelerate's DataLoader classes will automatically handle the device placement by default,\n+and [`Accelerator.backward`] should be used for performing the backward pass:\n+\n+```diff\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+- loss.backward()\n++ accelerator.backward(loss)\n+```\n+\n+With that, your training loop is now ready to use πŸ€— Accelerate!\n+\n+## The finished code", "from_author": false }, { "body": "```suggestion\r\nIn the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.", "from_author": false }, { "body": "The right navbar doesn't display code format so the ticks will just show up :/\r\n\r\n```suggestion\r\n## Using accelerate launch\r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`", "from_author": false }, { "body": "Need some newlines here too :)", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>", "from_author": false }, { "body": "```suggestion\r\nSince this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.", "from_author": false }, { "body": "```suggestion\r\nFor example, here is how to use `accelerate launch` with a single GPU:\r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.\n+For example, here is how to use `accelerate launch` but only use a single GPU:", "from_author": false }, { "body": "```suggestion\r\nIn this case, πŸ€— Accelerate will make some hyperparameter decisions for you, such as if a GPU is available, it will use all of them by default, and no mixed precision will be used. \r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.\n+For example, here is how to use `accelerate launch` but only use a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+Accelerate can take some liberties towards a few hyperparameters, such as if a GPU is available it will use all of them by default, and no mixed precision will be used. ", "from_author": false }, { "body": "I would maybe clarify that you'll get a warning if you don't pass in the needed parameters. This is kind of unclear until now.\r\n\r\n```suggestion\r\nwould also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: \r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.\n+For example, here is how to use `accelerate launch` but only use a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+Accelerate can take some liberties towards a few hyperparameters, such as if a GPU is available it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision, but also avoiding all of the warnings: ", "from_author": false }, { "body": "```suggestion\r\nFor a complete list of parameters you can pass in, run:\r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.\n+For example, here is how to use `accelerate launch` but only use a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+Accelerate can take some liberties towards a few hyperparameters, such as if a GPU is available it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision, but also avoiding all of the warnings: \n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} --arg1 --arg2 ...\n+```\n+\n+For a full list of all parameters you can pass in, run:", "from_author": false }, { "body": "```suggestion\r\n- If this does not exist either, the folder `~/.cache/huggingface/accelerate`.\r\n```", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.\n+For example, here is how to use `accelerate launch` but only use a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+Accelerate can take some liberties towards a few hyperparameters, such as if a GPU is available it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision, but also avoiding all of the warnings: \n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} --arg1 --arg2 ...\n+```\n+\n+For a full list of all parameters you can pass in, run:\n+```bash\n+accelerate launch -h\n+```\n+\n+<Tip>\n+Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts!\n+</Tip>\n+\n+## Custom Configurations\n+\n+As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations \n+made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for πŸ€— Accelerate. \n+This cache folder is located at (with decreasing order of priority):\n+\n+- The content of your environment variable `HF_HOME` suffixed with `accelerate`.\n+- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with\n+ `huggingface/accelerate`.\n+- If this does not exist either, the folder `~/.cache/huggingface/accelerate`", "from_author": false }, { "body": "I would also move the snippet at the end of this page under here since that's what it's showing here right?", "diff_hunk": "@@ -0,0 +1,144 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial you were introduced with how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using `accelerate launch`\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various different torch spawn methods, all of the environmental variables that would be expected for them can be modified here as well.\n+For example, here is how to use `accelerate launch` but only use a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+Accelerate can take some liberties towards a few hyperparameters, such as if a GPU is available it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision, but also avoiding all of the warnings: \n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} --arg1 --arg2 ...\n+```\n+\n+For a full list of all parameters you can pass in, run:\n+```bash\n+accelerate launch -h\n+```\n+\n+<Tip>\n+Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts!\n+</Tip>\n+\n+## Custom Configurations\n+\n+As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations \n+made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for πŸ€— Accelerate. \n+This cache folder is located at (with decreasing order of priority):\n+\n+- The content of your environment variable `HF_HOME` suffixed with `accelerate`.\n+- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with\n+ `huggingface/accelerate`.\n+- If this does not exist either, the folder `~/.cache/huggingface/accelerate`\n+\n+To have multiple configurations, the flag `--config_file` can be passed to the `accelerate launch` command paired ", "from_author": false }, { "body": "I think you can create a separate Troubleshoot page for all these caveats. I think a user just getting started doesn't necessarily need to know all these smaller details. Likewise, I think the Internal mechanism section is probably better in the Concepts and Fundamentals section since the user doesn't need to know exactly how it works under the hood. ", "diff_hunk": "@@ -271,6 +276,8 @@ passed your model to [`~Accelerator.prepare`]) will break the tying. You will ne\n after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in\n the Transformers repository.\n \n+Check out the [TPU tutorial](https://huggingface.co/docs/accelerate/concept_guides/training_tpu) for more information about training on TPUs. \n+\n \n ## Other caveats", "from_author": false }, { "body": "Adding a link to it would make it easier for users to navigate there.", "diff_hunk": "@@ -49,29 +49,29 @@ for index, batch in enumerate(training_dataloader):\n First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n \n ```diff\n-+ from accelerate import Accelerator\n-+ accelerator = Accelerator()\n-\n-+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n-+ model, optimizer, training_dataloader, scheduler\n-+ )\n-\n- for index, batch in enumerate(training_dataloader):\n- optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = model(inputs)\n- loss = loss_function(outputs, targets)\n- loss = loss / gradient_accumulation_steps\n-+ accelerator.backward(loss)\n- if (index+1) % gradient_accumulation_steps == 0:\n- optimizer.step()\n- scheduler.step()\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n ```\n \n <Tip warning={true}>\n-In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization.\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!", "from_author": false }, { "body": "```suggestion\r\nYou can remove all the special checks for the step number and the loss adjustment:\r\n```", "diff_hunk": "@@ -80,35 +80,39 @@ All that is left now is to let πŸ€— Accelerate handle the gradient accumulation\n of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n \n ```diff\n- from accelerate import Accelerator\n-- accelerator = Accelerator()\n-+ accelerator = Accelerator(gradient_accumulation_steps=2)\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n ```\n \n From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n-You just wrap it around the entire training part of your code: \n+You just wrap it around the entire training part of our code: \n \n ```diff\n-- for index, batch in enumerate(training_dataloader):\n-+ for batch in training_dataloader:\n-+ with accelerator.accumulate(model):\n- optimizer.zero_grad()\n- inputs, targets = batch\n- outputs = model(inputs)\n+-for index, batch in enumerate(training_dataloader):\n++for batch in training_dataloader:\n++ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n ```\n \n and you can remove all the special checks for the step number and the loss adjustment:", "from_author": false }, { "body": "Add some newlines so `DataLoaders` is properly rendered, and I'd also add a link to the Gradient Sync concept guide.", "diff_hunk": "@@ -80,35 +80,39 @@ All that is left now is to let πŸ€— Accelerate handle the gradient accumulation\n of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n \n ```diff\n- from accelerate import Accelerator\n-- accelerator = Accelerator()\n-+ accelerator = Accelerator(gradient_accumulation_steps=2)\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n ```\n \n From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n-You just wrap it around the entire training part of your code: \n+You just wrap it around the entire training part of our code: \n \n ```diff\n-- for index, batch in enumerate(training_dataloader):\n-+ for batch in training_dataloader:\n-+ with accelerator.accumulate(model):\n- optimizer.zero_grad()\n- inputs, targets = batch\n- outputs = model(inputs)\n+-for index, batch in enumerate(training_dataloader):\n++for batch in training_dataloader:\n++ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n ```\n \n and you can remove all the special checks for the step number and the loss adjustment:\n \n ```diff\n-- loss = loss / gradient_accumulation_steps\n- accelerator.backward(loss)\n-- if (index+1) % gradient_accumulation_steps == 0:\n- optimizer.step()\n- scheduler.step()\n+-loss = loss / gradient_accumulation_steps\n+accelerator.backward(loss)\n+-if (index+1) % gradient_accumulation_steps == 0:\n+optimizer.step()\n+scheduler.step()\n ```\n \n As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. \n \n+<Tip warning={true}>", "from_author": false }, { "body": "I think we can probably remove this header.", "diff_hunk": "@@ -0,0 +1,111 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+## Background into PyTorch distributed", "from_author": false }, { "body": "```suggestion\r\ntraining in a distributed setup. But how does this risk slowing down your code?\r\n```", "diff_hunk": "@@ -0,0 +1,111 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+## Background into PyTorch distributed\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+import torch.nn as nn\n+-from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10,10)\n++model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this automatically cause a risk of slowing down your code?", "from_author": false }, { "body": "```suggestion\r\nThe most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model\r\n```", "diff_hunk": "@@ -0,0 +1,111 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+## Background into PyTorch distributed\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+import torch.nn as nn\n+-from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10,10)\n++model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this automatically cause a risk of slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`, all instances of the model", "from_author": false }, { "body": "This was a little hard to follow, is this what you meant?\r\n\r\n```suggestion\r\nneed to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing \r\n```", "diff_hunk": "@@ -0,0 +1,111 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+## Background into PyTorch distributed\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+import torch.nn as nn\n+-from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10,10)\n++model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this automatically cause a risk of slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`, all instances of the model\n+need to have updated their gradients collated and updated before moving onto the next batch of data. But when performing ", "from_author": false }, { "body": "```suggestion\r\ngradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This \r\n```", "diff_hunk": "@@ -0,0 +1,111 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+## Background into PyTorch distributed\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+import torch.nn as nn\n+-from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10,10)\n++model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this automatically cause a risk of slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`, all instances of the model\n+need to have updated their gradients collated and updated before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skipping `.backward()` until `n` batches has been reached. This ", "from_author": false }, { "body": "Is `them` referring to the batches of data?\r\n\r\n```suggestion\r\ncan cause a significant slowdown since all the processes need to communicate with them more times than needed. How \r\n```", "diff_hunk": "@@ -0,0 +1,111 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+## Background into PyTorch distributed\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+import torch.nn as nn\n+-from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10,10)\n++model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this automatically cause a risk of slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`, all instances of the model\n+need to have updated their gradients collated and updated before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skipping `.backward()` until `n` batches has been reached. This \n+can cause a significant slowdown since all the processes need to communicate with them more times than needed. But how ", "from_author": false }, { "body": "Add newlines here as well :)", "diff_hunk": "@@ -0,0 +1,27 @@\n+# Deferring Executions\n+\n+When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\n+GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be\n+faster than others.\n+\n+You might need to wait for all processes to have reached a certain point before executing a given instruction. For\n+instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to \n+continue training before all the model weights have been loaded in. To do this, just write the following line in your code:\n+\n+```\n+accelerator.wait_for_everyone()\n+```\n+\n+This instruction will block all the processes that arrive first until all the other processes have reached that\n+point (if you run your script on just one GPU or CPU, this won't do anything).\n+\n+A few example cases for when to use this utility are listed below:\n+\n+<Tip>", "from_author": false }, { "body": "Add newlines here too", "diff_hunk": "@@ -0,0 +1,146 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Training on TPUs with πŸ€— Accelerate\n+\n+Training on TPUs can be slightly different than training on multi-gpu, even with πŸ€— Accelerate. This guide aims to show you \n+where you should be careful and why, as well as the best practices in general.\n+\n+## Training in a Notebook\n+\n+The main carepoint when training on TPUs comes from the `notebook_launcher`. As mentioned in the [notebook tutorial](https://huggingface.co/docs/accelerate/basic_tutorials/notebook), you need to \n+restructure your training code into a function that can get passed to the `notebook_launcher` function and be careful about not declaring any tensors on the GPU.\n+\n+While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. \n+When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already \n+utilizing a python process, you need to *fork* a new process from it to launch your code. \n+\n+Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your \n+training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one \n+model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or\n+on Google Colaboratory. \n+\n+Below is an example of a training function passed to the `notebook_launcher` if training on CPUs or GPUs:\n+\n+<Tip>", "from_author": false }, { "body": "At the time the concepts tutorial didn't exist πŸ˜ƒ will definitely add it in!", "diff_hunk": "@@ -49,29 +49,29 @@ for index, batch in enumerate(training_dataloader):\n First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper:\n \n ```diff\n-+ from accelerate import Accelerator\n-+ accelerator = Accelerator()\n-\n-+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n-+ model, optimizer, training_dataloader, scheduler\n-+ )\n-\n- for index, batch in enumerate(training_dataloader):\n- optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = model(inputs)\n- loss = loss_function(outputs, targets)\n- loss = loss / gradient_accumulation_steps\n-+ accelerator.backward(loss)\n- if (index+1) % gradient_accumulation_steps == 0:\n- optimizer.step()\n- scheduler.step()\n++from accelerate import Accelerator\n++accelerator = Accelerator()\n+\n++model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n++)\n+\n+for index, batch in enumerate(training_dataloader):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss = loss / gradient_accumulation_steps\n++ accelerator.backward(loss)\n+ if (index+1) % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ scheduler.step()\n ```\n \n <Tip warning={true}>\n-In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization.\n+In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the Concepts tutorial!", "from_author": true }, { "body": "This actually is a mistake because this warning is no longer needed! But I added a link to the gradient sync tutorial int he earlier mention of it", "diff_hunk": "@@ -80,35 +80,39 @@ All that is left now is to let πŸ€— Accelerate handle the gradient accumulation\n of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`Accelerator.backward`]:\n \n ```diff\n- from accelerate import Accelerator\n-- accelerator = Accelerator()\n-+ accelerator = Accelerator(gradient_accumulation_steps=2)\n+from accelerate import Accelerator\n+-accelerator = Accelerator()\n++accelerator = Accelerator(gradient_accumulation_steps=2)\n ```\n \n From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n-You just wrap it around the entire training part of your code: \n+You just wrap it around the entire training part of our code: \n \n ```diff\n-- for index, batch in enumerate(training_dataloader):\n-+ for batch in training_dataloader:\n-+ with accelerator.accumulate(model):\n- optimizer.zero_grad()\n- inputs, targets = batch\n- outputs = model(inputs)\n+-for index, batch in enumerate(training_dataloader):\n++for batch in training_dataloader:\n++ with accelerator.accumulate(model):\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n ```\n \n and you can remove all the special checks for the step number and the loss adjustment:\n \n ```diff\n-- loss = loss / gradient_accumulation_steps\n- accelerator.backward(loss)\n-- if (index+1) % gradient_accumulation_steps == 0:\n- optimizer.step()\n- scheduler.step()\n+-loss = loss / gradient_accumulation_steps\n+accelerator.backward(loss)\n+-if (index+1) % gradient_accumulation_steps == 0:\n+optimizer.step()\n+scheduler.step()\n ```\n \n As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. \n \n+<Tip warning={true}>", "from_author": true }, { "body": "```suggestion\r\nConvert existing codebases to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! \r\n```", "diff_hunk": "@@ -12,121 +12,60 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n-\n-## Features\n-\n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n ```diff\n + from accelerate import Accelerator\n-\n + accelerator = Accelerator()\n- # Use the device given by the *accelerator* object.\n-+ device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n-+ )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n-```\n \n-and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n-\n-You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n-especially for TPU training):\n-\n-```diff\n-+ from accelerate import Accelerator\n-\n-+ accelerator = Accelerator()\n-- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n++ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n + )\n \n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n++ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n ```\n \n-## Script launcher\n+Built on `torch_xla` and `torch.distributed`, πŸ€— Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.\n+platforms separately so you don't have to. Convert existing codebases to utilize [DeepSpeed](https://huggingface.co/docs/accelerate/deepspeed), perform [fully sharded data parallelism](https://huggingface.co/docs/accelerate/fsdp), and have automatic support for mixed-precision training! ", "from_author": false }, { "body": "```suggestion\r\nSome data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result, metrics\r\n```", "diff_hunk": "@@ -137,17 +137,22 @@ As for the training dataloader, passing your validation dataloader through\n [`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X\n (since your actual batch size will be multiplied by X), unless you set `split_batches=True`.\n \n+</Tip>\n+\n Any instruction using your training dataloader length (for instance if you need the number of total training steps\n-to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n+to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`]. \n \n-As some data at the end of the dataset may be duplicated so the batch can divide equally to all workers, metrics should be \n-calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data.\n+Some data at the end of the dataset may be duplicated so the batch can divide equally to all workers. As a result metrics", "from_author": false }, { "body": "Should be un-commented before merging.", "diff_hunk": "@@ -1,6 +1,6 @@\n name: Run Tests\n \n-on: [pull_request]\n+# on: [pull_request]", "from_author": false }, { "body": "Installation should stay in Getting Started.", "diff_hunk": "@@ -1,38 +1,67 @@\n-- sections: \n+- sections:\n - local: index\n title: πŸ€— Accelerate\n - local: quicktour\n- title: Quick tour\n- - local: installation\n+ title: Quicktour of πŸ€— Accelerate\n+ title: Getting started\n+- sections:\n+ - local: basic_tutorials/overview\n+ title: Overview\n+ - local: basic_tutorials/install", "from_author": false }, { "body": "```suggestion\r\n title: Quicktour\r\n```\r\nI'm pretty sure the user won't expect a quicktour of another lib here ;-)", "diff_hunk": "@@ -1,38 +1,67 @@\n-- sections: \n+- sections:\n - local: index\n title: πŸ€— Accelerate\n - local: quicktour\n- title: Quick tour\n- - local: installation\n+ title: Quicktour of πŸ€— Accelerate", "from_author": false }, { "body": "That's an editable install, a source install is just installing from git and should be detailed here, before you speak about editable install for developping.", "diff_hunk": "@@ -0,0 +1,91 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Installation and Configuration\n+\n+Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.7+**.\n+\n+## Installing πŸ€— Accelerate\n+\n+πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:\n+\n+### pip \n+\n+To install πŸ€— Accelerate from pypi, perform:\n+\n+```bash\n+pip install accelerate\n+```\n+\n+### conda\n+\n+πŸ€— Accelerate can also be installed with conda with:\n+\n+```bash\n+conda install -c conda-forge accelerate\n+```\n+\n+### Source\n+\n+New features are added every day that haven't been released yet. To try them out yourself, install\n+from the GitHub repository:\n+\n+```bash\n+git clone https://github.com/huggingface/accelerate\n+cd accelerate\n+pip install -e .\n+```", "from_author": false }, { "body": "You should insist here that running `accelerate config` beforehand is recommended.", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. ", "from_author": false }, { "body": "You should give a code example of this.", "diff_hunk": "@@ -0,0 +1,29 @@\n+# Deferring Executions\n+\n+When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\n+GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be\n+faster than others.\n+\n+You might need to wait for all processes to have reached a certain point before executing a given instruction. For\n+instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to \n+continue training before all the model weights have been loaded in. To do this, just write the following line in your code:\n+\n+```\n+accelerator.wait_for_everyone()\n+```\n+\n+This instruction will block all the processes that arrive first until all the other processes have reached that\n+point (if you run your script on just one GPU or CPU, this won't do anything).\n+\n+A few example cases for when to use this utility are listed below:\n+\n+<Tip>\n+\n+Some of these are utilized with the [`Accelerator.main_process_first`] context manager, which utilizes [`Accelerator.wait_for_everyone`] to \n+run a particular set of code on the main process beforehand before triggering and launching the other processes\n+\n+</Tip>\n+\n+- Downloading a dataset on the main process first and then loading the cached dataset in afterwards", "from_author": false }, { "body": "Use 4 lines of indentation.", "diff_hunk": "@@ -12,121 +12,60 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n-\n-## Features\n-\n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n ```diff\n + from accelerate import Accelerator\n-\n + accelerator = Accelerator()\n- # Use the device given by the *accelerator* object.\n-+ device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n-+ )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n-```\n \n-and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n-\n-You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n-especially for TPU training):\n-\n-```diff\n-+ from accelerate import Accelerator\n-\n-+ accelerator = Accelerator()\n-- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n++ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n + )\n \n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n++ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()", "from_author": false }, { "body": "I would show first the sample where we don't change the device as letting Accelerate handle device placement changes more lines of code.", "diff_hunk": "@@ -12,121 +12,60 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device.\n-\n-## Features\n-\n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n- then launch the scripts.\n-\n-\n-## Easy to integrate\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-```python\n-my_model.to(device)\n-\n-for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-```\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n+πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.\n \n ```diff\n + from accelerate import Accelerator\n-\n + accelerator = Accelerator()\n- # Use the device given by the *accelerator* object.\n-+ device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n-+ )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n-```\n \n-and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n-\n-You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n-especially for TPU training):\n-\n-```diff\n-+ from accelerate import Accelerator\n-\n-+ accelerator = Accelerator()\n-- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n-+ my_model, my_optimizer, my_training_dataloader\n++ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n++ model, optimizer, training_dataloader, scheduler\n + )\n \n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n-- inputs = inputs.to(device)\n-- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n-- loss.backward()\n-+ accelerator.backward(loss)\n- my_optimizer.step()\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)", "from_author": false }, { "body": "The old page had way more information, which we should keep.", "diff_hunk": "@@ -0,0 +1,17 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerator\n+\n+The main [`Accelerator`] class:", "from_author": false }, { "body": "````suggestion\r\n```python\r\n````\r\nWhy remove this? We lose all formatting after...", "diff_hunk": "@@ -24,8 +24,8 @@ which can total to adding just one new line of code!\n \n This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n \n-```python\n-device = \"cuda\"\n+```", "from_author": false }, { "body": "Please use 4 spaces to indent", "diff_hunk": "@@ -86,18 +88,18 @@ of steps to perform before each call to `step()` and how to automatically adjust\n ```\n \n From here you can use the [`Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!\n-You just wrap it around the entire training part of your code: \n+You just wrap it around the entire training part of our code: \n \n ```diff\n - for index, batch in enumerate(training_dataloader):\n + for batch in training_dataloader:\n + with accelerator.accumulate(model):\n- optimizer.zero_grad()\n- inputs, targets = batch\n- outputs = model(inputs)\n+ optimizer.zero_grad()", "from_author": false }, { "body": "Some content seems to be lost here as there is no usage guide.", "diff_hunk": "@@ -0,0 +1,19 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launchers\n+\n+Functions for launching training on distributed processes.", "from_author": false }, { "body": "```suggestion\r\nIn this case, πŸ€— Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision.\r\n```", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\n+For example, here is how to use `accelerate launch` with a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, such as if a GPU is available, it will use all of them by default, and no mixed precision will be used. ", "from_author": false }, { "body": "```suggestion\r\nHere is how you would use all GPUs and train with mixed precision disabled:\r\n```", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\n+For example, here is how to use `accelerate launch` with a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, such as if a GPU is available, it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:", "from_author": false }, { "body": "maybe specify that this is one machine with two GPUs?", "diff_hunk": "@@ -0,0 +1,91 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Installation and Configuration\n+\n+Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.7+**.\n+\n+## Installing πŸ€— Accelerate\n+\n+πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:\n+\n+### pip \n+\n+To install πŸ€— Accelerate from pypi, perform:\n+\n+```bash\n+pip install accelerate\n+```\n+\n+### conda\n+\n+πŸ€— Accelerate can also be installed with conda with:\n+\n+```bash\n+conda install -c conda-forge accelerate\n+```\n+\n+### Source\n+\n+New features are added every day that haven't been released yet. To try them out yourself, install\n+from the GitHub repository:\n+\n+```bash\n+git clone https://github.com/huggingface/accelerate\n+cd accelerate\n+pip install -e .\n+```\n+\n+## Configuring πŸ€— Accelerate\n+\n+After installing, you need to configure πŸ€— Accelerate for how the current system is setup for training. \n+To do so run the following and answer the questions prompted to you:\n+\n+```bash\n+accelerate config\n+```\n+\n+To write a barebones configuration that doesn't include options such as DeepSpeed configuration or running on TPUs, you can quickly run:\n+\n+```bash\n+python -c \"from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')\"\n+```\n+πŸ€— Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode.\n+\n+To check that your configuration looks fine, run:\n+\n+```bash\n+accelerate env\n+```\n+\n+An example output is shown below:", "from_author": false }, { "body": "```suggestion\r\nThis command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them are.\r\n```", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.", "from_author": false }, { "body": "```suggestion\r\nIf you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`.\r\n```", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`", "from_author": false }, { "body": "why curly brackets here? i think also something like that would be clear enough:\r\n```suggestion\r\naccelerate launch your_script.py --arg1 --arg2 ...\r\n```", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...", "from_author": false }, { "body": "what's the benefit of that? what happens if you start a random script with accelerate?", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\n+For example, here is how to use `accelerate launch` with a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, such as if a GPU is available, it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: \n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} --arg1 --arg2 ...\n+```\n+\n+For a complete list of parameters you can pass in, run:\n+```bash\n+accelerate launch -h\n+```\n+\n+<Tip>\n+\n+Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts!", "from_author": false }, { "body": "regarding path_to_script.py: maybe just be consistent with the above name used.", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\n+For example, here is how to use `accelerate launch` with a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, such as if a GPU is available, it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: \n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} --arg1 --arg2 ...\n+```\n+\n+For a complete list of parameters you can pass in, run:\n+```bash\n+accelerate launch -h\n+```\n+\n+<Tip>\n+\n+Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts!\n+\n+</Tip>\n+\n+## Custom Configurations\n+\n+As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations \n+made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for πŸ€— Accelerate. \n+This cache folder is located at (with decreasing order of priority):\n+\n+- The content of your environment variable `HF_HOME` suffixed with `accelerate`.\n+- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with\n+ `huggingface/accelerate`.\n+- If this does not exist either, the folder `~/.cache/huggingface/accelerate`.\n+\n+To have multiple configurations, the flag `--config_file` can be passed to the `accelerate launch` command paired \n+with the location of the custom yaml. \n+\n+An example yaml may look something like the following:\n+```yaml\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: MULTI_GPU\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 1\n+use_cpu: false\n+```\n+\n+An example of launching a script with a yaml located in a custom location would look something like so:\n+```bash\n+accelerate launch --config_file path/to/config/my_config_file.yaml path_to_script.py --args_for_the_script", "from_author": false }, { "body": "`prepare` makes the \"Setting the right device\" a bit obsolete no? maybe make a note when to use `prepare` or `model.to`", "diff_hunk": "@@ -0,0 +1,119 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Migrating your code to πŸ€— Accelerate\n+\n+This tutorial will detail how to easily convert existing PyTorch code to use πŸ€— Accelerate!\n+You'll see that by just changing a few lines of code, πŸ€— Accelerate can perform its magic and get you on \n+your way towards running your code on distributed systems with ease!\n+\n+## The base training loop\n+\n+To begin, write out a very basic PyTorch training loop. \n+\n+<Tip>\n+We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.\n+</Tip>\n+\n+```python\n+device = \"cuda\"\n+model.to(device)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ loss.backward()\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+## Add in πŸ€— Accelerate\n+\n+To start using πŸ€— Accelerate, first import and create an [`Accelerator`] instance:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!\n+\n+### Setting the right device\n+\n+The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should\n+change the definition of `device` to come from [`Accelerator`]:\n+\n+```diff\n+- device = 'cuda'\n++ device = accelerator.device\n+ model.to(device)\n+```\n+\n+### Preparing your objects\n+\n+Next you need to pass all of the important objects related to training into [`Accelerator.prepare`]. πŸ€— Accelerate will\n+make sure everything is setup in the current environment for you to start training:\n+\n+```\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+```\n+These objects are returned in the same order they were sent in with. By default, `device_placement=True`, so your model is automatically moved to the proper device.\n+performing this action will also automatically move your model to the proper device as well.", "from_author": false }, { "body": "agree, a few examples would be useful here. also a graphic could be useful.", "diff_hunk": "@@ -0,0 +1,29 @@\n+# Deferring Executions\n+\n+When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\n+GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be\n+faster than others.\n+\n+You might need to wait for all processes to have reached a certain point before executing a given instruction. For\n+instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to \n+continue training before all the model weights have been loaded in. To do this, just write the following line in your code:\n+\n+```\n+accelerator.wait_for_everyone()\n+```\n+\n+This instruction will block all the processes that arrive first until all the other processes have reached that\n+point (if you run your script on just one GPU or CPU, this won't do anything).\n+\n+A few example cases for when to use this utility are listed below:\n+\n+<Tip>\n+\n+Some of these are utilized with the [`Accelerator.main_process_first`] context manager, which utilizes [`Accelerator.wait_for_everyone`] to \n+run a particular set of code on the main process beforehand before triggering and launching the other processes\n+\n+</Tip>\n+\n+- Downloading a dataset on the main process first and then loading the cached dataset in afterwards", "from_author": false }, { "body": "shouldn't you use the target to get a loss?\r\n```suggestion\r\n ddp_model(input, target).backward()\r\n```", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+ import torch.nn as nn\n+- from torch.nn.parallel import DistributedDataParallel\n+\n+ model = nn.Linear(10,10)\n++ model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this risk slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model\n+need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This \n+can cause a significant slowdown since all the processes need to communicate with them more times than needed. How \n+can you avoid this overhead?\n+\n+## Solving the slowdown problem\n+\n+Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called. \n+PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager\n+that is added to your model after converting it to DDP.\n+\n+Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this \n+context manager will trigger the synchronization. See an example below:\n+```python\n+ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+for index, batch in enumerate(dataloader):\n+ input, target = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader) - 1):\n+ with ddp_model.no_sync():\n+ # Gradients only accumulate\n+ ddp_model(input).backward()", "from_author": false }, { "body": "same as above\r\n```suggestion\r\n ddp_model(input, target).backward()\r\n```", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+ import torch.nn as nn\n+- from torch.nn.parallel import DistributedDataParallel\n+\n+ model = nn.Linear(10,10)\n++ model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this risk slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model\n+need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This \n+can cause a significant slowdown since all the processes need to communicate with them more times than needed. How \n+can you avoid this overhead?\n+\n+## Solving the slowdown problem\n+\n+Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called. \n+PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager\n+that is added to your model after converting it to DDP.\n+\n+Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this \n+context manager will trigger the synchronization. See an example below:\n+```python\n+ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+for index, batch in enumerate(dataloader):\n+ input, target = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader) - 1):\n+ with ddp_model.no_sync():\n+ # Gradients only accumulate\n+ ddp_model(input).backward()\n+ else:\n+ # Gradients finally sync\n+ ddp_model(input).backward()", "from_author": false }, { "body": "same as above", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+ import torch.nn as nn\n+- from torch.nn.parallel import DistributedDataParallel\n+\n+ model = nn.Linear(10,10)\n++ model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this risk slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model\n+need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This \n+can cause a significant slowdown since all the processes need to communicate with them more times than needed. How \n+can you avoid this overhead?\n+\n+## Solving the slowdown problem\n+\n+Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called. \n+PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager\n+that is added to your model after converting it to DDP.\n+\n+Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this \n+context manager will trigger the synchronization. See an example below:\n+```python\n+ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+for index, batch in enumerate(dataloader):\n+ input, target = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader) - 1):\n+ with ddp_model.no_sync():\n+ # Gradients only accumulate\n+ ddp_model(input).backward()\n+ else:\n+ # Gradients finally sync\n+ ddp_model(input).backward()\n+```\n+\n+In πŸ€— Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),\n+`ddp_model.no_sync` gets replaced with [`Accelerator.no_sync`] and operates the same way:\n+\n+```diff\n+ ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+ for index, batch in enumerate(dataloader):\n+ input, target = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader)-1):\n+- with ddp_model.no_sync():\n++ with accelerator.no_sync(model):\n+ # Gradients only accumulate\n+ ddp_model(input).backward()", "from_author": false }, { "body": "If you want to make it a bit more concise here, you could just show the right way here and just say: if you initialize the model inside the function this might happen <show error message and explanation>.", "diff_hunk": "@@ -0,0 +1,154 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Training on TPUs with πŸ€— Accelerate\n+\n+Training on TPUs can be slightly different than training on multi-gpu, even with πŸ€— Accelerate. This guide aims to show you \n+where you should be careful and why, as well as the best practices in general.\n+\n+## Training in a Notebook\n+\n+The main carepoint when training on TPUs comes from the `notebook_launcher`. As mentioned in the [notebook tutorial](https://huggingface.co/docs/accelerate/basic_tutorials/notebook), you need to \n+restructure your training code into a function that can get passed to the `notebook_launcher` function and be careful about not declaring any tensors on the GPU.\n+\n+While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. \n+When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already \n+utilizing a python process, you need to *fork* a new process from it to launch your code. \n+\n+Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your \n+training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one \n+model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or\n+on Google Colaboratory. \n+\n+Below is an example of a training function passed to the `notebook_launcher` if training on CPUs or GPUs:\n+\n+<Tip>\n+\n+This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight \n+modifications for the sake of simplicity\n+\n+</Tip>\n+\n+```python\n+def training_function():\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=hyperparameters[\"learning_rate\"])\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader\n+ )\n+\n+ num_epochs = hyperparameters[\"num_epochs\"]\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ accelerator.backward(loss)\n+\n+ optimizer.step()\n+ optimizer.zero_grad()\n+```\n+\n+```python\n+from accelerate import notebook_launcher\n+\n+notebook_launcher(training_function)\n+```\n+\n+<Tip>\n+\n+The `notebook_launcher` will default to 8 processes if πŸ€— Accelerate has been configured for a TPU\n+\n+</Tip>\n+\n+If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error \n+like:\n+\n+```\n+ProcessExitedException: process 0 terminated with signal SIGSEGV\n+```\n+\n+This error is *extremely* cryptic but the basic explaination is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \n+accept a single `model` argument, and declare it in an outside cell:\n+\n+```python\n+# In another Jupyter cell\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+```\n+\n+```diff\n++ def training_function(model):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+- model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+ ...\n+```\n+And finally calling the training function with:\n+\n+```diff\n+ from accelerate import notebook_launcher\n+- notebook_launcher(training_function)\n++ notebook_launcher(training_function, (model,))\n+```\n+", "from_author": false }, { "body": "can you maybe explain a bit what the difference to the first setting is and what the impact is and when you should use which?", "diff_hunk": "@@ -0,0 +1,154 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Training on TPUs with πŸ€— Accelerate\n+\n+Training on TPUs can be slightly different than training on multi-gpu, even with πŸ€— Accelerate. This guide aims to show you \n+where you should be careful and why, as well as the best practices in general.\n+\n+## Training in a Notebook\n+\n+The main carepoint when training on TPUs comes from the `notebook_launcher`. As mentioned in the [notebook tutorial](https://huggingface.co/docs/accelerate/basic_tutorials/notebook), you need to \n+restructure your training code into a function that can get passed to the `notebook_launcher` function and be careful about not declaring any tensors on the GPU.\n+\n+While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. \n+When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already \n+utilizing a python process, you need to *fork* a new process from it to launch your code. \n+\n+Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your \n+training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one \n+model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or\n+on Google Colaboratory. \n+\n+Below is an example of a training function passed to the `notebook_launcher` if training on CPUs or GPUs:\n+\n+<Tip>\n+\n+This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight \n+modifications for the sake of simplicity\n+\n+</Tip>\n+\n+```python\n+def training_function():\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=hyperparameters[\"learning_rate\"])\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader\n+ )\n+\n+ num_epochs = hyperparameters[\"num_epochs\"]\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ accelerator.backward(loss)\n+\n+ optimizer.step()\n+ optimizer.zero_grad()\n+```\n+\n+```python\n+from accelerate import notebook_launcher\n+\n+notebook_launcher(training_function)\n+```\n+\n+<Tip>\n+\n+The `notebook_launcher` will default to 8 processes if πŸ€— Accelerate has been configured for a TPU\n+\n+</Tip>\n+\n+If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error \n+like:\n+\n+```\n+ProcessExitedException: process 0 terminated with signal SIGSEGV\n+```\n+\n+This error is *extremely* cryptic but the basic explaination is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \n+accept a single `model` argument, and declare it in an outside cell:\n+\n+```python\n+# In another Jupyter cell\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+```\n+\n+```diff\n++ def training_function(model):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+- model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+ ...\n+```\n+And finally calling the training function with:\n+\n+```diff\n+ from accelerate import notebook_launcher\n+- notebook_launcher(training_function)\n++ notebook_launcher(training_function, (model,))\n+```\n+\n+## Mixed Precision and Global Variables \n+\n+As mentioned in the [mixed precision tutorial](https://huggingface.co/docs/accelerate/usage_guides/mixed_precision), πŸ€— Accelerate supports fp16 and bf16, both of which can be used on TPUs.\n+That being said, ideally `bf16` should be utilized as it is extremely efficient to use.\n+\n+There are two \"layers\" when using `bf16` and πŸ€— Accelerate on TPUs, at the base level and at the operation level. \n+\n+At the base level, this is enabled when passing `mixed_precision=\"bf16\"` to `Accelerator`, such as:\n+```python\n+accelerator = Accelerator(mixed_precision=\"bf16\")\n+```\n+By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. \n+The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.\n+\n+There is a futher configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then \n+`torch.float` is `bfloat16` and `torch.double` is `float32`.", "from_author": false }, { "body": "It will remove the boilerplate needed when calling `torchrun` XYZ. I can expand this further to point it out, by translating the earlier bash with accelerate launch to show the equivalence. ", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!\n+\n+Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.\n+For example, here is how to use `accelerate launch` with a single GPU:\n+```bash\n+CUDA_VISIBLE_DEVICES=\"0\" accelerate launch {script_name.py} --arg1 --arg2 ...\n+```\n+\n+You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.\n+In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, such as if a GPU is available, it will use all of them by default, and no mixed precision will be used. \n+Here is how you would use all GPUs and train on no mixed precision:\n+```bash\n+accelerate launch --multi_gpu {script_name.py} --arg1 --arg2 ...\n+```\n+To get more specific you should pass in the needed parameters yourself. For instance, here is how you \n+would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: \n+```bash\n+accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} --arg1 --arg2 ...\n+```\n+\n+For a complete list of parameters you can pass in, run:\n+```bash\n+accelerate launch -h\n+```\n+\n+<Tip>\n+\n+Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts!", "from_author": true }, { "body": "I skipped over calculating the loss in this case, to simply show calling `.backward()`. but I'll include a loss calculation to make it a bit more clear", "diff_hunk": "@@ -0,0 +1,109 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Gradient Synchronization\n+\n+PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.\n+This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints\n+when using the `ddp` module. \n+\n+These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. \n+This happens when the model is wrapped with `DistributedDataParallel`:\n+```python\n+import torch.nn as nn\n+from torch.nn.parallel import DistributedDataParallel\n+\n+model = nn.Linear(10, 10)\n+ddp_model = DistributedDataParallel(model)\n+```\n+In πŸ€— Accelerate this conversion happens automatically when calling [`Accelerator.prepare`] and passing in your model.\n+\n+```diff\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+ import torch.nn as nn\n+- from torch.nn.parallel import DistributedDataParallel\n+\n+ model = nn.Linear(10,10)\n++ model = accelerator.prepare(model)\n+```\n+\n+## The slowdown in gradient accumulation\n+\n+You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when \n+training in a distributed setup. But how does this risk slowing down your code?\n+\n+In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected\n+at specific points and these must also occur at roughly the same time before moving on.\n+\n+The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model\n+need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing \n+gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This \n+can cause a significant slowdown since all the processes need to communicate with them more times than needed. How \n+can you avoid this overhead?\n+\n+## Solving the slowdown problem\n+\n+Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called. \n+PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager\n+that is added to your model after converting it to DDP.\n+\n+Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this \n+context manager will trigger the synchronization. See an example below:\n+```python\n+ddp_model, dataloader = accelerator.prepare(model, dataloader)\n+\n+for index, batch in enumerate(dataloader):\n+ input, target = batch\n+ # Trigger gradient synchronization on the last batch\n+ if index != (len(dataloader) - 1):\n+ with ddp_model.no_sync():\n+ # Gradients only accumulate\n+ ddp_model(input).backward()", "from_author": true }, { "body": "I made it less concise since one example shows running a regular script vs this one is for the tpu launcher in a notebook, but will think on this a bit more", "diff_hunk": "@@ -0,0 +1,154 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Training on TPUs with πŸ€— Accelerate\n+\n+Training on TPUs can be slightly different than training on multi-gpu, even with πŸ€— Accelerate. This guide aims to show you \n+where you should be careful and why, as well as the best practices in general.\n+\n+## Training in a Notebook\n+\n+The main carepoint when training on TPUs comes from the `notebook_launcher`. As mentioned in the [notebook tutorial](https://huggingface.co/docs/accelerate/basic_tutorials/notebook), you need to \n+restructure your training code into a function that can get passed to the `notebook_launcher` function and be careful about not declaring any tensors on the GPU.\n+\n+While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. \n+When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already \n+utilizing a python process, you need to *fork* a new process from it to launch your code. \n+\n+Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your \n+training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one \n+model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or\n+on Google Colaboratory. \n+\n+Below is an example of a training function passed to the `notebook_launcher` if training on CPUs or GPUs:\n+\n+<Tip>\n+\n+This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight \n+modifications for the sake of simplicity\n+\n+</Tip>\n+\n+```python\n+def training_function():\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=hyperparameters[\"learning_rate\"])\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader\n+ )\n+\n+ num_epochs = hyperparameters[\"num_epochs\"]\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ accelerator.backward(loss)\n+\n+ optimizer.step()\n+ optimizer.zero_grad()\n+```\n+\n+```python\n+from accelerate import notebook_launcher\n+\n+notebook_launcher(training_function)\n+```\n+\n+<Tip>\n+\n+The `notebook_launcher` will default to 8 processes if πŸ€— Accelerate has been configured for a TPU\n+\n+</Tip>\n+\n+If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error \n+like:\n+\n+```\n+ProcessExitedException: process 0 terminated with signal SIGSEGV\n+```\n+\n+This error is *extremely* cryptic but the basic explaination is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to \n+accept a single `model` argument, and declare it in an outside cell:\n+\n+```python\n+# In another Jupyter cell\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+```\n+\n+```diff\n++ def training_function(model):\n+ # Initialize accelerator\n+ accelerator = Accelerator()\n+- model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", num_labels=2)\n+ train_dataloader, eval_dataloader = create_dataloaders(\n+ train_batch_size=hyperparameters[\"train_batch_size\"], eval_batch_size=hyperparameters[\"eval_batch_size\"]\n+ )\n+ ...\n+```\n+And finally calling the training function with:\n+\n+```diff\n+ from accelerate import notebook_launcher\n+- notebook_launcher(training_function)\n++ notebook_launcher(training_function, (model,))\n+```\n+", "from_author": true }, { "body": "Thanks for pointing this out! Will make sure to have a `notebook.mdx` as part of the `basic_tutorials` before merging", "diff_hunk": "@@ -0,0 +1,19 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launchers\n+\n+Functions for launching training on distributed processes.", "from_author": true }, { "body": "Accident πŸ˜“ ", "diff_hunk": "@@ -24,8 +24,8 @@ which can total to adding just one new line of code!\n \n This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:\n \n-```python\n-device = \"cuda\"\n+```", "from_author": true }, { "body": "With curly brackets the reader knows right away that it's meant to represent templating, though I didn't propagate this well enough through the docs so will make sure to do so", "diff_hunk": "@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Launching your πŸ€— Accelerate scripts\n+\n+In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate.\n+The final version of that code is shown below:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+\n+model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+)\n+\n+for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+```\n+\n+But how do you run this code and have it utilize the special hardware available to it?\n+\n+First you should rewrite the above code into a function, and make it callable as a script. For example:\n+\n+```diff\n+ from accelerate import Accelerator\n+ \n++ def main():\n+\n+ accelerator = Accelerator()\n+\n+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(\n+ model, optimizer, training_dataloader, scheduler\n+ )\n+\n+ for batch in training_dataloader:\n+ optimizer.zero_grad()\n+ inputs, targets = batch\n+ outputs = model(inputs)\n+ loss = loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+\n++ if __name__ == \"__main__\":\n++ main()\n+```\n+\n+Next you need to launch it with `accelerate launch`. \n+\n+## Using accelerate launch\n+\n+πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.\n+This command wraps around all of the different commands needed to launch your script on various platforms, without you\n+having to remember what each of them are.\n+\n+<Tip>\n+\n+If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`\n+\n+</Tip>\n+\n+You can launch your script quickly by using:\n+```bash\n+accelerate launch {script_name.py} --arg1 --arg2 ...", "from_author": true }, { "body": "Move the file back to the root maybe? It would be more consistent.", "diff_hunk": "@@ -1,38 +1,68 @@\n-- sections: \n+- sections:\n - local: index\n title: πŸ€— Accelerate\n- - local: quicktour\n- title: Quick tour\n- - local: installation\n+ - local: basic_tutorials/install", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@pacman100 the difference between the two is something like the following:\r\n- How-To guides are how do you do things with **Accelerate**, e.g. how to perform gradient accumulation, how to perform logging, how to perform tracking (think our example scripts!)\r\n- Concepts and Fundamentals is given this thing accelerate does, here's an understanding of some of the harder-to-grasp magic it does underneath that you should know more about as you become more familiar with the library. E.g. Gradient synchronization is how we do gradient accumulation, so that's one example where we took an applied concept taught in the higher-level tutorial and took it to a lower level.\r\n\r\n- Re: example scripts, it'll be in this PR eventually, if not a follow up. Still thinking on a good design for integration. Right now this is a very tl;dr version to make sure everyone is aligned (which we are!) before getting too deep, and now I'll move towards migrating the full docs in (avoiding your reference issues, etc, etc)", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/495", "pr_id": 990570274 }, { "diff": "diff --git a/examples/requirements.txt b/examples/requirements.txt\nindex 65b6b04c3..912986bf1 100644\n--- a/examples/requirements.txt\n+++ b/examples/requirements.txt\n@@ -1 +1,3 @@\n-accelerate # used to be installed in Amazon SageMaker environment\n\\ No newline at end of file\n+accelerate # used to be installed in Amazon SageMaker environment\n+evaluate\n+datasets==2.3.2\n\\ No newline at end of file\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex fcb527aeb..fe3d05bf3 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -23,6 +23,7 @@\n import yaml\n \n from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION\n \n \n hf_cache_home = os.path.expanduser(\n@@ -123,7 +124,10 @@ def __post_init__(self):\n if isinstance(self.compute_environment, str):\n self.compute_environment = ComputeEnvironment(self.compute_environment)\n if isinstance(self.distributed_type, str):\n- self.distributed_type = DistributedType(self.distributed_type)\n+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n+ self.distributed_type = SageMakerDistributedType(self.distributed_type)\n+ else:\n+ self.distributed_type = DistributedType(self.distributed_type)\n \n \n @dataclass\n@@ -156,5 +160,6 @@ class SageMakerConfig(BaseConfig):\n region: str = \"us-east-1\"\n num_machines: int = 1\n base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\n- pytorch_version: str = \"1.6\"\n- transformers_version: str = \"4.4\"\n+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION\n+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION\n+ py_version: str = SAGEMAKER_PYTHON_VERSION\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex f54659548..c0b17bc88 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -16,6 +16,7 @@\n import json\n import os\n \n+from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES\n from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType\n from ...utils.imports import is_boto3_available\n from .config_args import SageMakerConfig\n@@ -120,23 +121,31 @@ def get_sagemaker_input():\n _create_iam_role_for_sagemaker(iam_role_name)\n \n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] data parallelism, [2] model parallelism): \",\n+ \"Which type of machine are you using? ([0] No distributed training, [1] data parallelism): \",\n _convert_sagemaker_distributed_mode,\n- error_message=\"Please enter 0, 1 or 2\",\n+ error_message=\"Please enter 0 or 1\",\n )\n \n- # using the best two instances for single-gpu training or multi-gpu -> can turn into question to make it more diverse\n- ec2_instance_type = \"ml.p3.2xlarge\" if distributed_type == SageMakerDistributedType.NO else \"ml.p3dn.24xlarge\"\n+ ec2_instance_query = \"Which EC2 instance type you want to use for your training \"\n+ if distributed_type != SageMakerDistributedType.NO:\n+ ec2_instance_query += \"(\"\n+ for i, instance_type in enumerate(SAGEMAKER_PARALLEL_EC2_INSTANCES):\n+ ec2_instance_query += f\"[{i}] {instance_type}, \"\n+ ec2_instance_query = ec2_instance_query[:-2] + \")? [0]: \"\n+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)])\n+ else:\n+ ec2_instance_query += \"? [ml.p3.2xlarge]:\"\n+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default=\"ml.p3.2xlarge\")\n+\n num_machines = 1\n if (\n distributed_type == SageMakerDistributedType.DATA_PARALLEL\n or distributed_type == SageMakerDistributedType.MODEL_PARALLEL\n ):\n- raise NotImplementedError(\"Model or Data Parallelism is not implemented yet. We are working on it\")\n num_machines = _ask_field(\n- \"How many machines do you want use? [2]: \",\n+ \"How many machines do you want use? [1]: \",\n lambda x: int(x),\n- default=2,\n+ default=1,\n )\n \n mixed_precision = _ask_field(\n@@ -148,6 +157,7 @@ def get_sagemaker_input():\n return SageMakerConfig(\n compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,\n distributed_type=distributed_type,\n+ use_cpu=False,\n ec2_instance_type=ec2_instance_type,\n profile=aws_profile,\n region=aws_region,\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 2bc6fbe4d..421bfe064 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -35,6 +35,7 @@\n is_deepspeed_available,\n is_sagemaker_available,\n )\n+from accelerate.utils.dataclasses import SageMakerDistributedType\n \n \n def launch_command_parser(subparsers=None):\n@@ -486,9 +487,15 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n mixed_precision = \"fp16\"\n \n # Environment variables to be set for use during training job\n- environment = {\"MIXED_PRECISION\": str(mixed_precision)}\n+ environment = {\n+ \"USE_SAGEMAKER\": \"true\",\n+ \"MIXED_PRECISION\": str(mixed_precision),\n+ \"SAGEMAKER_DISTRIBUTED_TYPE\": sagemaker_config.distributed_type.value,\n+ }\n # configure distribution set up\n distribution = None # TODO: not yet implemented\n+ if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:\n+ distribution = {\"smdistributed\": {\"dataparallel\": {\"enabled\": True}}}\n \n # configure session\n print(\"Creating Estimator\")\n@@ -496,9 +503,9 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n entry_point=entry_point,\n source_dir=source_dir,\n role=sagemaker_config.iam_role_name,\n- transformers_version=\"4.4\",\n- pytorch_version=\"1.6\",\n- py_version=\"py36\",\n+ transformers_version=sagemaker_config.transformers_version,\n+ pytorch_version=sagemaker_config.pytorch_version,\n+ py_version=sagemaker_config.py_version,\n base_job_name=sagemaker_config.base_job_name,\n instance_count=sagemaker_config.num_machines,\n instance_type=sagemaker_config.ec2_instance_type,\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 330830eb5..4482341f3 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -18,6 +18,7 @@\n import torch\n \n from .utils import DistributedType, is_ccl_available, is_deepspeed_available, is_tpu_available\n+from .utils.dataclasses import SageMakerDistributedType\n \n \n if is_tpu_available(check_device=False):\n@@ -89,7 +90,25 @@ def __init__(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n \"before using any functionality from the `accelerate` library.\"\n )\n- if is_tpu_available() and not cpu:\n+ if (\n+ os.environ.get(\"USE_SAGEMAKER\", \"false\") == \"true\"\n+ and os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") != SageMakerDistributedType.NO\n+ and not cpu\n+ ):\n+ if os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.DATA_PARALLEL:\n+ self.distributed_type = DistributedType.MULTI_GPU\n+ import smdistributed.dataparallel.torch.torch_smddp # noqa\n+\n+ if not torch.distributed.is_initialized():\n+ torch.distributed.init_process_group(backend=\"smddp\")\n+ self.backend = \"smddp\"\n+ self.num_processes = torch.distributed.get_world_size()\n+ self.process_index = torch.distributed.get_rank()\n+ self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n+ self.device = torch.device(\"cuda\", self.local_process_index)\n+ torch.cuda.set_device(self.device)\n+ self.mixed_precision = mixed_precision\n+ elif is_tpu_available() and not cpu:\n self.distributed_type = DistributedType.TPU\n self.num_processes = xm.xrt_world_size()\n self.process_index = xm.get_ordinal()\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex af8a6bf35..173cfa790 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -20,5 +20,9 @@\n RNG_STATE_NAME = \"random_states\"\n OPTIMIZER_NAME = \"optimizer\"\n SCHEDULER_NAME = \"scheduler\"\n+SAGEMAKER_PYTORCH_VERSION = \"1.10.2\"\n+SAGEMAKER_PYTHON_VERSION = \"py38\"\n+SAGEMAKER_TRANSFORMERS_VERSION = \"4.17.0\"\n+SAGEMAKER_PARALLEL_EC2_INSTANCES = [\"ml.p3.16xlarge\", \"ml.p3dn.24xlarge\", \"ml.p4dn.24xlarge\"]\n \n STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\n", "code_comments": [ { "body": "Would remove the choice and error message for now as I don't think we will add this any time soon :-)", "diff_hunk": "@@ -89,7 +90,27 @@ def __init__(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n \"before using any functionality from the `accelerate` library.\"\n )\n- if is_tpu_available() and not cpu:\n+ if (\n+ os.environ.get(\"USE_SAGEMAKER\", \"false\") == \"true\"\n+ and os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") != SageMakerDistributedType.NO\n+ and not cpu\n+ ):\n+ if os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.DATA_PARALLEL:\n+ self.distributed_type = DistributedType.MULTI_GPU\n+ import smdistributed.dataparallel.torch.torch_smddp # noqa\n+\n+ if not torch.distributed.is_initialized():\n+ torch.distributed.init_process_group(backend=\"smddp\")\n+ self.backend = \"smddp\"\n+ self.num_processes = torch.distributed.get_world_size()\n+ self.process_index = torch.distributed.get_rank()\n+ self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n+ self.device = torch.device(\"cuda\", self.local_process_index)\n+ torch.cuda.set_device(self.device)\n+ self.mixed_precision = mixed_precision\n+ elif os.environ.get(\"SAGEMAKER_DISTRIBUTED_TYPE\") == SageMakerDistributedType.MODEL_PARALLEL:\n+ raise NotImplementedError(\"Model Parallelism is not implemented yet. We are working on it\")", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I will test this with my use case as soon as it gets merged into `main`", "from_author": false }, { "body": "> LGTM! How should we maintain the `SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION` versions once we release new DLCs?\r\n\r\nHello, as it is not a frequent change, it can be done manually for time being whenever new releases of DLC happen. Any suggestions/best practices for automating it? ", "from_author": true }, { "body": "Hello @pacman100, what is the sagemaker SDK estimator supposed to be used with accelerate?", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/494", "pr_id": 990448495 }, { "diff": "diff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex 8da23bee8..4ff1d6ff9 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -26,7 +26,7 @@ The main work on your PyTorch `DataLoader` is done by the following function:\n \n [[autodoc]] data_loader.prepare_data_loader\n \n-### BatchSamplerShard\n+### DataLoaderShard\n \n [[autodoc]] data_loader.DataLoaderShard\n \ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 524034eb1..f58752e53 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -38,6 +38,22 @@\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n \n+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n+ \"\"\"\n+ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.\n+\n+ **Available attributes:**\n+\n+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n+ number of processes\n+ \"\"\"\n+\n+ @property\n+ def total_batch_size(self):\n+ return self._loader.total_batch_size\n+\n+\n logger = get_logger(__name__)\n \n # kwargs of the DataLoader in min version 1.4.0.\n@@ -289,6 +305,12 @@ class DataLoaderShard(DataLoader):\n A random number generator to keep synchronized across processes.\n kwargs:\n All other keyword arguments to pass to the regular `DataLoader` initialization.\n+\n+ **Available attributes:**\n+\n+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n+ number of processes\n \"\"\"\n \n def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):\n@@ -321,6 +343,14 @@ def __iter__(self):\n yield current_batch\n break\n \n+ @property\n+ def total_batch_size(self):\n+ return (\n+ self.batch_sampler.batch_size\n+ if self.batch_sampler.split_batches\n+ else (self.batch_sampler.batch_size * self.batch_sampler.num_processes)\n+ )\n+\n \n class DataLoaderDispatcher(DataLoader):\n \"\"\"\n@@ -334,6 +364,12 @@ class DataLoaderDispatcher(DataLoader):\n the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial\n `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch\n size of the `dataloader` is a round multiple of `batch_size`.\n+\n+ **Available attributes:**\n+\n+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n+ number of processes\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n@@ -432,6 +468,12 @@ def __len__(self):\n else:\n return math.ceil(whole_length / self.state.num_processes)\n \n+ @property\n+ def total_batch_size(self):\n+ return (\n+ self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)\n+ )\n+\n \n def prepare_data_loader(\n dataloader: DataLoader,\n@@ -577,7 +619,10 @@ def prepare_data_loader(\n \n if dispatch_batches:\n dataloader = DataLoaderDispatcher(\n- new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs\n+ new_dataset,\n+ split_batches=split_batches,\n+ batch_sampler=new_batch_sampler,\n+ **kwargs,\n )\n else:\n dataloader = DataLoaderShard(\n@@ -590,5 +635,5 @@ def prepare_data_loader(\n )\n \n if state.distributed_type == DistributedType.TPU:\n- return xpl.MpDeviceLoader(dataloader, device)\n+ return MpDeviceLoaderWrapper(dataloader, device)\n return dataloader\n", "code_comments": [ { "body": "Here we should probably define a `property for `total_batch_size` that goes read it in the `loader`. It's not something that should be settable anyway.", "diff_hunk": "@@ -38,6 +38,12 @@\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n \n+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n+ def __init__(self, loader, device, **kwargs):\n+ super().__init__(loader, device, **kwargs)\n+ self.total_batch_size = loader.total_batch_size", "from_author": false }, { "body": "Same comments here except I think `total_batch_size` can be computed in a property since we have access to the `batch_size` and `split_batches`.", "diff_hunk": "@@ -336,7 +343,7 @@ class DataLoaderDispatcher(DataLoader):\n size of the `dataloader` is a round multiple of `batch_size`.\n \"\"\"\n \n- def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ def __init__(self, dataset, split_batches: bool = False, total_batch_size=None, **kwargs):", "from_author": false }, { "body": "```suggestion\r\n```\r\nCan be removed entirely ;-)", "diff_hunk": "@@ -38,6 +38,15 @@\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n \n+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n+ def __init__(self, loader, device, **kwargs):\n+ super().__init__(loader, device, **kwargs)", "from_author": false }, { "body": "Let's add a docstring to the class here stating briefly what it is and the new property, mimicking how [Accelerator](https://github.com/huggingface/accelerate/blob/docs-revamp/src/accelerate/accelerator.py#L140-L152) has it, since this will be useful for folks and it should show in the documentation as it's different from the base torch dataloader.\r\n\r\nSimilarly add this property doc to the other two classes as well.", "diff_hunk": "@@ -38,6 +38,12 @@\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n \n+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n+ @property\n+ def total_batch_size(self):\n+ return self._loader.total_batch_size", "from_author": false }, { "body": "```suggestion\r\n \"\"\"\r\n Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. \r\n **Available attributes:**\r\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True` otherwise the original batch size * the total number of processes\r\n \"\"\"\r\n```\r\n\r\nSince property documentation doesn't get automatically documented when we do autodoc, it's better to include it in the main class docstring instead. ", "diff_hunk": "@@ -39,8 +39,18 @@\n import torch_xla.distributed.parallel_loader as xpl\n \n class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n+ \"\"\"\n+ Wrapper for the xpl.MpDeviceLoader class. This class is used to add `total_batch_size` property to the\n+ xpl.MpDeviceLoader class.\n+ \"\"\"\n+", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/493", "pr_id": 990158735 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 7e99c76ef..7f04c719a 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -977,7 +977,7 @@ def save_state(self, output_dir: str):\n output_dir = os.path.expanduser(output_dir)\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving current state to {output_dir}\")\n- weights = [self.get_state_dict(m) for m in self._models]\n+ weights = [self.get_state_dict(m, unwrap=False) for m in self._models]\n save_location = save_accelerator_state(\n output_dir, weights, self._optimizers, self._schedulers, self.state.process_index, self.scaler\n )\n@@ -1056,7 +1056,7 @@ def _get_devices(self, *args):\n break\n return (model_device, optimizer_device)\n \n- def get_state_dict(self, model):\n+ def get_state_dict(self, model, unwrap=True):\n is_zero_3 = False\n if self.distributed_type == DistributedType.DEEPSPEED:\n is_zero_3 = self.deepspeed_config[\"zero_optimization\"][\"stage\"] == 3\n@@ -1072,7 +1072,8 @@ def get_state_dict(self, model):\n \"To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.\"\n )\n else:\n- model = self.unwrap_model(model)\n+ if unwrap:\n+ model = self.unwrap_model(model)\n state_dict = model.state_dict()\n \n if state_dict is not None:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Since this went unnoticed, might be good to add in a deepspeed test to the checkpointing test available here: https://github.com/huggingface/accelerate/blob/main/tests/test_state_checkpointing.py\r\n\r\nIf you think you can manage that feel free to add it to this PR, worst case I can add it in as a follow up", "from_author": false }, { "body": "Thanks! I think I can add a test tomorrow.", "from_author": true }, { "body": "Hi, although this solves the model weights issue, but now it fails when loading optimizer states.\r\n\r\nBecause deepspeed `optimizer.state_dict()` returns a dictionary.\r\nhttps://github.com/microsoft/DeepSpeed/blob/9305916d6b858b20fca3ebe03940ceaaa13cdfbd/deepspeed/runtime/zero/stage_1_and_2.py#L1998\r\n\r\nBut deepspeed `optimizer.load_state_dict()` expects a list of dictionary.\r\nhttps://github.com/microsoft/DeepSpeed/blob/9305916d6b858b20fca3ebe03940ceaaa13cdfbd/deepspeed/runtime/zero/stage_1_and_2.py#L2135-L2167\r\nbecause deepspeed `load_checkpoint` actually gets multiple state_dict (one for each partition or something) https://github.com/microsoft/DeepSpeed/blob/9305916d6b858b20fca3ebe03940ceaaa13cdfbd/deepspeed/runtime/engine.py#L2650\r\n\r\nSo I think I will just use this\r\n```\r\nif accelerator.distributed_type == DistributedType.DEEPSPEED:\r\n model.load_checkpoint(path)\r\n model.save_checkpoint(path)\r\nelse:\r\n accelerator.load_state(path)\r\n accelerator.save_state(path)\r\n```", "from_author": true }, { "body": "Since the test I tried to add fails when loading optimizer, I won't push it here. Please feel free to merge or close this PR.", "from_author": true }, { "body": "Let's merge this first fix (thanks a lot!). Cc @pacman100 for the second test.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/489", "pr_id": 988789537 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 8e3b9e130..09199154a 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -110,10 +110,9 @@ def training_function(config, args):\n batch_size = int(config[\"batch_size\"])\n \n # We need to initialize the trackers we use, and also store our configuration\n- if args.with_tracking:\n- if accelerator.is_main_process:\n- experiment_config = vars(args)\n- accelerator.init_trackers(\"fsdp_glue_no_trainer\", experiment_config)\n+ if args.with_tracking and accelerator.is_main_process:\n+ experiment_config = vars(args)\n+ accelerator.init_trackers(\"fsdp_glue_no_trainer\", experiment_config)\n \n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n datasets = load_dataset(\"glue\", \"mrpc\")\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 78d4af422..00c8665dd 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -162,10 +162,9 @@ def training_function(config, args):\n \n # New Code #\n # We need to initalize the trackers we use. Overall configurations can also be stored\n- if args.with_tracking:\n- if accelerator.is_main_process:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- accelerator.init_trackers(run, config)\n+ if args.with_tracking and accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ accelerator.init_trackers(run, config)\n \n # Now we train the model\n for epoch in range(num_epochs):\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 8f893b7c4..17cc83170 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -103,12 +103,11 @@ def training_function(config, args):\n checkpointing_steps = None\n \n # We need to initialize the trackers we use, and also store our configuration\n- if args.with_tracking:\n- if accelerator.is_main_process:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.init_trackers(run, config)\n+ if args.with_tracking and accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Grab all the image filenames\n file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(\".jpg\")]\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 572bc9a23..dc0cf43ed 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -75,12 +75,11 @@ def training_function(config, args):\n batch_size = int(config[\"batch_size\"])\n \n # We need to initialize the trackers we use, and also store our configuration\n- if args.with_tracking:\n- if accelerator.is_main_process:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.init_trackers(run, config)\n+ if args.with_tracking and accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 35afd6c5d..5f32a83d1 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -19,6 +19,7 @@\n import sys\n import warnings\n from contextlib import contextmanager\n+from functools import wraps\n from typing import List, Optional, Union\n \n import torch\n@@ -356,23 +357,59 @@ def mixed_precision(self):\n mixed_precision = self.state.mixed_precision\n return mixed_precision\n \n- @contextmanager\n- def local_main_process_first(self):\n+ def on_main_process(func):\n+ \"\"\"\n+ A decorator that will run the decorated function on the main process only.\n \"\"\"\n- Lets the local main process go inside a with block.\n \n- The other processes will enter the with block after the main process exits.\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_local_main_process(func):\n+ \"\"\"\n+ A decorator that will run the decorated function on the local main process only.\n \"\"\"\n- yield from self._goes_first(self.is_local_main_process)\n \n- @contextmanager\n- def main_process_first(self):\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_local_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_process(process_idx):\n+ \"\"\"\n+ A decorator that will run the decorated function on a given process index only.\n \"\"\"\n- Lets the main process go first inside a with block.\n \n- The other processes will enter the with block after the main process exits.\n+ def decorator(func):\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.process_idx == process_idx or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ return decorator\n+\n+ def on_local_process(local_process_idx):\n \"\"\"\n- yield from self._goes_first(self.is_main_process)\n+ Run func on certain local process only\n+ \"\"\"\n+\n+ def decorator(func):\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.local_process_idx == local_process_idx or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ return decorator\n \n def _goes_first(self, is_main):\n if not is_main:\n@@ -383,6 +420,24 @@ def _goes_first(self, is_main):\n if is_main:\n self.wait_for_everyone()\n \n+ @contextmanager\n+ def main_process_first(self):\n+ \"\"\"\n+ Lets the main process go first inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_main_process)\n+\n+ @contextmanager\n+ def local_main_process_first(self):\n+ \"\"\"\n+ Lets the local main process go inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_local_main_process)\n+\n @contextmanager\n def no_sync(self, model):\n \"\"\"\n@@ -991,6 +1046,7 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None, init_k\n for tracker in self.trackers:\n tracker.store_init_configuration(config)\n \n+ @on_main_process\n def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dict] = {}):\n \"\"\"\n Logs `values` to all stored trackers in `self.trackers`.\n@@ -1007,17 +1063,16 @@ def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dic\n {\"wandb\": {\"tags\": [\"tag_a\", \"tag_b\"]}}\n ```\n \"\"\"\n- if self.is_main_process:\n- for tracker in self.trackers:\n- tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))\n+ for tracker in self.trackers:\n+ tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))\n \n+ @on_main_process\n def end_training(self):\n \"\"\"\n Runs any special end training behaviors, such as stopping trackers\n \"\"\"\n- if self.is_main_process:\n- for tracker in self.trackers:\n- tracker.finish()\n+ for tracker in self.trackers:\n+ tracker.finish()\n \n def save(self, obj, f):\n \"\"\"\n", "code_comments": [ { "body": "```suggestion\r\n A decorator that will run the decorated function on the main process only.\r\n```", "diff_hunk": "@@ -356,23 +357,59 @@ def mixed_precision(self):\n mixed_precision = self.state.mixed_precision\n return mixed_precision\n \n- @contextmanager\n- def local_main_process_first(self):\n+ def on_main_process(func):\n+ \"\"\"\n+ Run func on main process only", "from_author": false }, { "body": "```suggestion\r\n A decorator that will run the decorated function on the local main process only.\r\n```", "diff_hunk": "@@ -356,23 +357,59 @@ def mixed_precision(self):\n mixed_precision = self.state.mixed_precision\n return mixed_precision\n \n- @contextmanager\n- def local_main_process_first(self):\n+ def on_main_process(func):\n+ \"\"\"\n+ Run func on main process only\n \"\"\"\n- Lets the local main process go inside a with block.\n \n- The other processes will enter the with block after the main process exits.\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_local_main_process(func):\n+ \"\"\"\n+ Run func on local main process only", "from_author": false }, { "body": "```suggestion\r\n A decorator that will run the decorated function on a given process index only.\r\n```", "diff_hunk": "@@ -356,23 +357,59 @@ def mixed_precision(self):\n mixed_precision = self.state.mixed_precision\n return mixed_precision\n \n- @contextmanager\n- def local_main_process_first(self):\n+ def on_main_process(func):\n+ \"\"\"\n+ Run func on main process only\n \"\"\"\n- Lets the local main process go inside a with block.\n \n- The other processes will enter the with block after the main process exits.\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_local_main_process(func):\n+ \"\"\"\n+ Run func on local main process only\n \"\"\"\n- yield from self._goes_first(self.is_local_main_process)\n \n- @contextmanager\n- def main_process_first(self):\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_local_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_process(process_idx):\n+ \"\"\"\n+ Run func on certain process only", "from_author": false }, { "body": "```suggestion\r\n def on_process(process_idx, local=False):\r\n```\r\nMaybe we could group this one and the text in one decorator since it's one that takes arguments? ", "diff_hunk": "@@ -356,23 +357,59 @@ def mixed_precision(self):\n mixed_precision = self.state.mixed_precision\n return mixed_precision\n \n- @contextmanager\n- def local_main_process_first(self):\n+ def on_main_process(func):\n+ \"\"\"\n+ Run func on main process only\n \"\"\"\n- Lets the local main process go inside a with block.\n \n- The other processes will enter the with block after the main process exits.\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_local_main_process(func):\n+ \"\"\"\n+ Run func on local main process only\n \"\"\"\n- yield from self._goes_first(self.is_local_main_process)\n \n- @contextmanager\n- def main_process_first(self):\n+ @wraps(func)\n+ def wrapper(self, *args, **kwargs):\n+ if self.is_local_main_process or not self.use_distributed:\n+ return func(self, *args, **kwargs)\n+\n+ return wrapper\n+\n+ def on_process(process_idx):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Hi @sgugger , sorry for the delay, hope this is better now", "from_author": true }, { "body": "@ZhiyuanChen could you run \"make style; make quality\" to solve the Quality Check issue? πŸ˜ƒ \r\n\r\nThanks!", "from_author": false }, { "body": "> @ZhiyuanChen could you run \"make style; make quality\" to solve the Quality Check issue? πŸ˜ƒ\r\n> \r\n> Thanks!\r\n\r\nSorry, I thought I have fixed it but somehow didn't pushed... ", "from_author": true }, { "body": "> Thanks a lot for iterating! Almost ready to merge, I just left some comments on the docstrings and a suggestion to group the decorators `on_process` and `on_local_process` together. Let me know your thoughts!\n\nThank you very much tor your comments and suggestion. \n\nFor the docstrings, i have accepted rll suggestions. \n\nFor the group... I think its rather important to ensure api in a similar organisation, and grouping in this way break the consistency with `is_local_main_process` and `on_local_main_process`. \nThough we could use alias/partial/impl to make the implementation more compact", "from_author": true }, { "body": "Let's roll with your choice and we'll see what users think then. We can always add aliases in the future :-)\r\n\r\nThanks again for your contribution!", "from_author": false }, { "body": "> Let's roll with your choice and we'll see what users think then. We can always add aliases in the future :-)\n\nHaha, sure thing~\n\n> Thanks again for your contribution!\n\nNo worries~\n", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/488", "pr_id": 987251673 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 196aa0b06..eedbf3392 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -280,7 +280,7 @@ def collate_fn(examples):\n predictions, references = accelerator.gather(\n (predictions, batch[\"labels\"])\n ) # If we are in a multiprocess environment, the last batch has duplicates\n- if accelerator.num_processes > 1:\n+ if accelerator.use_distributed:\n if step == len(eval_dataloader) - 1:\n predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n references = references[: len(eval_dataloader.dataset) - samples_seen]\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex c9445ee81..741e62b8c 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -178,7 +178,7 @@ def training_function(config, args):\n predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n # New Code #\n # First we check if it's a distributed system\n- if accelerator.num_processes > 1:\n+ if accelerator.use_distributed:\n # Then see if we're on the last batch of our eval dataloader\n if step == len(eval_dataloader) - 1:\n # Last batch needs to be truncated on distributed systems as it contains additional samples\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex bd2ed3b29..6e414bc9c 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -242,7 +242,7 @@ def training_function(config, args):\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n- if accelerator.num_processes > 1:\n+ if accelerator.use_distributed:\n if step == len(eval_dataloader) - 1:\n predictions = predictions[: len(eval_dataloader) - samples_seen]\n references = references[: len(eval_dataloader) - samples_seen]\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 0433ed9a1..6e6aec0ff 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -222,7 +222,7 @@ def collate_fn(examples):\n predictions, references = accelerator.gather(\n (predictions, batch[\"labels\"])\n ) # If we are in a multiprocess environment, the last batch has duplicates\n- if accelerator.num_processes > 1:\n+ if accelerator.use_distributed:\n if step == len(eval_dataloader) - 1:\n predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n references = references[: len(eval_dataloader.dataset) - samples_seen]\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 99afddeb8..0a2e5bb35 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -273,6 +273,10 @@ def __init__(\n if self.rng_types is None:\n self.rng_types = [\"torch\"] if is_torch_version(\"<=\", \"1.5.1\") else [\"generator\"]\n \n+ @property\n+ def use_distributed(self):\n+ return self.distributed_type != DistributedType.NO and self.num_processes > 1\n+\n @property\n def distributed_type(self):\n return self.state.distributed_type\n@@ -361,7 +365,7 @@ def no_sync(self, model):\n PyTorch Module that was prepared with `Accelerator.prepare`\n \"\"\"\n context = contextlib.nullcontext\n- if self.num_processes > 1:\n+ if self.use_distributed:\n context = getattr(model, \"no_sync\", context)\n \n with context():\n", "code_comments": [ { "body": "Maybe also check for the number of processes to be `> 1` here, since you are replacing all those tests?", "diff_hunk": "@@ -273,6 +273,10 @@ def __init__(\n if self.rng_types is None:\n self.rng_types = [\"torch\"] if is_torch_version(\"<=\", \"1.5.1\") else [\"generator\"]\n \n+ @property\n+ def use_distributed(self):\n+ return self.distributed_type != DistributedType.NO", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I noticed the rest modification proposed in the original issues have been resolved, so just add this last property\r\n", "from_author": true }, { "body": "> Thanks for your PR! I left a small comment.\n\nThank you for the comments, I have updated the pr", "from_author": true }, { "body": "Thanks!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/487", "pr_id": 987242084 }, { "diff": "diff --git a/docs/source/accelerator.mdx b/docs/source/accelerator.mdx\nindex 6e7a93a3d..5202aeb8c 100644\n--- a/docs/source/accelerator.mdx\n+++ b/docs/source/accelerator.mdx\n@@ -38,4 +38,27 @@ should search for and replace by the corresponding methods of your `accelerator`\n - Use [`~Accelerator.clip_grad_norm_`] instead of `torch.nn.utils.clip_grad_norm_` and\n [`~Accelerator.clip_grad_value_`] instead of `torch.nn.utils.clip_grad_value_`.\n \n+To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`. \n+This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should\n+actually be performed, and auto-scale the loss:\n+\n+```python\n+accelerator = Accelerator(gradient_accumulation_steps=2)\n+\n+for (input, label) in enumerate(training_dataloader):\n+ with accelerator.accumulate(model):\n+ predictions = model(input)\n+ loss = loss_function(predictions, labels)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+ optimizer.zero_grad()\n+```\n+\n+<Tip warning={true}>\n+\n+Using this with `dispatch_batches=True` (which is the default for iterable datasets) is currently not supported.\n+\n+</Tip>\n+\n [[autodoc]] Accelerator\ndiff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex e1eec36be..8da23bee8 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -12,6 +12,10 @@ specific language governing permissions and limitations under the License.\n \n # Internals\n \n+## Gradient Accumulation states\n+\n+[[autodoc]] state.GradientState\n+\n ## Optimizer\n \n [[autodoc]] optimizer.AcceleratedOptimizer\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex b69978023..d3b253bce 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -102,19 +102,21 @@ def collate_fn(examples):\n \n \n def training_function(config, args):\n- # Initialize accelerator\n- accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n- # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n- lr = config[\"lr\"]\n- num_epochs = int(config[\"num_epochs\"])\n- seed = int(config[\"seed\"])\n- batch_size = int(config[\"batch_size\"])\n # New Code #\n gradient_accumulation_steps = int(args.gradient_accumulation_steps)\n+ # Initialize accelerator\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps\n+ )\n if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:\n raise NotImplementedError(\n \"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`\"\n )\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n \n metric = evaluate.load(\"glue\", \"mrpc\")\n \n@@ -152,20 +154,11 @@ def training_function(config, args):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n # New code #\n- # We use the new `no_sync` context manager to prevent gradient averaging\n- # until we want to at the proper step if we happen to be in a distributed setup\n- # otherwise it does nothing\n+ # We use the new `accumulate` context manager to perform gradient accumulation\n # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.\n- if step % gradient_accumulation_steps != 0:\n- # Accumulate gradients locally\n- with accelerator.no_sync(model):\n- output = model(**batch)\n- loss = output.loss / gradient_accumulation_steps\n- accelerator.backward(loss)\n- else:\n- # Sync gradients and step\n+ with accelerator.accumulate(model):\n output = model(**batch)\n- loss = output.loss / gradient_accumulation_steps\n+ loss = output.loss\n accelerator.backward(loss)\n optimizer.step()\n lr_scheduler.step()\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1b3fa27e9..1148262af 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -28,7 +28,7 @@\n from .logging import get_logger\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n-from .state import AcceleratorState\n+from .state import AcceleratorState, GradientState\n from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n DeepSpeedPlugin,\n@@ -92,6 +92,9 @@ class Accelerator:\n default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ gradient_accumulation_steps (`int`, *optional*, default to 1):\n+ The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with\n+ `Accelerator.accumulate`.\n cpu (`bool`, *optional*):\n Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force\n the execution on one process only.\n@@ -146,6 +149,7 @@ def __init__(\n split_batches: bool = False,\n fp16: bool = None,\n mixed_precision: Union[PrecisionType, str] = None,\n+ gradient_accumulation_steps: int = 1,\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n fsdp_plugin: FullyShardedDataParallelPlugin = None,\n@@ -231,6 +235,17 @@ def __init__(\n **kwargs,\n )\n \n+ if gradient_accumulation_steps > 1:\n+ if self.state.distributed_type == DistributedType.TPU:\n+ raise NotImplementedError(\n+ \"Gradient accumulation on TPU is not supported. Pass in `gradient_accumulation_steps=1`\"\n+ )\n+ if dispatch_batches:\n+ raise NotImplementedError(\n+ \"Gradient accumulation with dispatched dataloaders is not supported. Pass in `gradient_accumulation_steps=1` or `dispatch_batches=False`\"\n+ )\n+\n+ self.gradient_accumulation_steps = gradient_accumulation_steps\n self.device_placement = device_placement\n self.split_batches = split_batches\n self.dispatch_batches = dispatch_batches\n@@ -262,6 +277,10 @@ def __init__(\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n \n+ # Start of internal step tracking\n+ self.step = 0\n+ self.gradient_state = GradientState()\n+\n # Internal references to the training objects\n self._optimizers = []\n self._models = []\n@@ -367,6 +386,37 @@ def no_sync(self, model):\n with context():\n yield\n \n+ def _do_sync(self):\n+ \"Sets the right `sync_gradients` context and either resets or increases `self.step`\"\n+ if self.gradient_state.end_of_dataloader:\n+ self.step = 0\n+ self.gradient_state._set_sync_gradients(True)\n+ else:\n+ self.step += 1\n+ self.gradient_state._set_sync_gradients((self.step % self.gradient_accumulation_steps) == 0)\n+\n+ @property\n+ def sync_gradients(self):\n+ return self.gradient_state.sync_gradients\n+\n+ @contextmanager\n+ def accumulate(self, model):\n+ \"\"\"\n+ A context manager that will lightly wrap around and perform gradient accumulation automatically\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ PyTorch Module that was prepared with `Accelerator.prepare`\n+ \"\"\"\n+ self._do_sync()\n+ if self.sync_gradients:\n+ context = contextlib.nullcontext\n+ else:\n+ context = self.no_sync\n+\n+ with context(model):\n+ yield\n+\n def print(self, *args, **kwargs):\n \"\"\"\n Use in replacement of `print()` to only print once per server.\n@@ -734,6 +784,7 @@ def backward(self, loss, **kwargs):\n \"\"\"\n Use `accelerator.backward(loss)` in lieu of `loss.backward()`.\n \"\"\"\n+ loss /= self.gradient_accumulation_steps\n if self.distributed_type == DistributedType.DEEPSPEED:\n self.deepspeed_engine_wrapped.backward(loss, **kwargs)\n elif self.scaler is not None:\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 806fb7ddc..524034eb1 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -18,7 +18,8 @@\n import torch\n from torch.utils.data import BatchSampler, DataLoader, IterableDataset\n \n-from .state import AcceleratorState, DistributedType, is_tpu_available\n+from .logging import get_logger\n+from .state import AcceleratorState, DistributedType, GradientState, is_tpu_available\n from .utils import (\n RNGType,\n broadcast,\n@@ -37,6 +38,7 @@\n if is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n \n+logger = get_logger(__name__)\n \n # kwargs of the DataLoader in min version 1.4.0.\n _PYTORCH_DATALOADER_KWARGS = {\n@@ -294,31 +296,44 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n- for batch in super().__iter__():\n- yield batch if self.device is None else send_to_device(batch, self.device)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ dataloader_iter = super().__iter__()\n+ # We iterate one batch ahead to check when we are at the end\n+ try:\n+ current_batch = next(dataloader_iter)\n+ except StopIteration:\n+ yield\n+ while True:\n+ try:\n+ # But we still move it to the device so it is done before `StopIteration` is reached\n+ if self.device is not None:\n+ current_batch = send_to_device(current_batch, self.device)\n+ next_batch = next(dataloader_iter)\n+ yield current_batch\n+ current_batch = next_batch\n+ except StopIteration:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ yield current_batch\n+ break\n \n \n class DataLoaderDispatcher(DataLoader):\n \"\"\"\n+ Args:\n Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each\n process their part of the batch.\n-\n- Args:\n split_batches (`bool`, *optional*, defaults to `False`):\n Whether the resulting `DataLoader` should split the batches of the original data loader across devices or\n yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of\n- `num_processes` batches at each iteration).\n-\n- Another way to see this is that the observed batch size will be the same as the initial `dataloader` if\n- this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`\n- otherwise.\n-\n- Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of\n- `batch_size`.\n+ `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be\n+ the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial\n+ `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch\n+ size of the `dataloader` is a round multiple of `batch_size`.\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n@@ -338,6 +353,9 @@ def __init__(self, dataset, split_batches: bool = False, **kwargs):\n if shuffle:\n torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)\n \n+ self.gradient_state = GradientState()\n+ self.state = AcceleratorState()\n+\n def __iter__(self):\n state = AcceleratorState()\n if state.process_index == 0:\n@@ -408,12 +426,11 @@ def __iter__(self):\n yield slice_tensors(batch, data_slice)\n \n def __len__(self):\n- state = AcceleratorState()\n whole_length = super().__len__()\n if self.drop_last:\n- return whole_length // state.num_processes\n+ return whole_length // self.state.num_processes\n else:\n- return math.ceil(whole_length / state.num_processes)\n+ return math.ceil(whole_length / self.state.num_processes)\n \n \n def prepare_data_loader(\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 046d5ce5b..c5ed66cd4 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -17,7 +17,7 @@\n \n import torch\n \n-from .state import AcceleratorState\n+from .state import AcceleratorState, GradientState\n from .utils import DistributedType, honor_type, is_torch_version, is_tpu_available\n \n \n@@ -53,6 +53,7 @@ def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.accelerator_state = AcceleratorState()\n+ self.gradient_state = GradientState()\n self.device_placement = device_placement\n self._is_overflow = False\n \n@@ -101,37 +102,39 @@ def state_dict(self):\n return self.optimizer.state_dict()\n \n def zero_grad(self, set_to_none=None):\n- if is_torch_version(\"<\", \"1.7.0\"):\n- if set_to_none is not None:\n- raise ValueError(\n- \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n- f\"earlier versions (found version {torch.__version__}).\"\n- )\n- self.optimizer.zero_grad()\n- else:\n- accept_arg = \"set_to_none\" in inspect.signature(self.optimizer.zero_grad).parameters\n- if accept_arg:\n- if set_to_none is None:\n- set_to_none = False\n- self.optimizer.zero_grad(set_to_none=set_to_none)\n- else:\n+ if self.gradient_state.sync_gradients:\n+ if is_torch_version(\"<\", \"1.7.0\"):\n if set_to_none is not None:\n- raise ValueError(\"`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.\")\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n self.optimizer.zero_grad()\n+ else:\n+ accept_arg = \"set_to_none\" in inspect.signature(self.optimizer.zero_grad).parameters\n+ if accept_arg:\n+ if set_to_none is None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n+ else:\n+ if set_to_none is not None:\n+ raise ValueError(\"`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.\")\n+ self.optimizer.zero_grad()\n \n def step(self, closure=None):\n- if self.accelerator_state.distributed_type == DistributedType.TPU:\n- optimizer_args = {\"closure\": closure} if closure is not None else {}\n- xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n- elif self.scaler is not None:\n- scale_before = self.scaler.get_scale()\n- self.scaler.step(self.optimizer, closure)\n- self.scaler.update()\n- scale_after = self.scaler.get_scale()\n- # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.\n- self._is_overflow = scale_after < scale_before\n- else:\n- self.optimizer.step(closure)\n+ if self.gradient_state.sync_gradients:\n+ if self.accelerator_state.distributed_type == DistributedType.TPU:\n+ optimizer_args = {\"closure\": closure} if closure is not None else {}\n+ xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n+ elif self.scaler is not None:\n+ scale_before = self.scaler.get_scale()\n+ self.scaler.step(self.optimizer, closure)\n+ self.scaler.update()\n+ scale_after = self.scaler.get_scale()\n+ # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.\n+ self._is_overflow = scale_after < scale_before\n+ else:\n+ self.optimizer.step(closure)\n \n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\ndiff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nindex 1fac5f012..cfd6d719f 100644\n--- a/src/accelerate/scheduler.py\n+++ b/src/accelerate/scheduler.py\n@@ -12,7 +12,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from .state import AcceleratorState\n+# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation\n+\n+import warnings\n+\n+from .state import AcceleratorState, GradientState\n+\n+\n+warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.optim.lr_scheduler\")\n \n \n class AcceleratedScheduler:\n@@ -40,6 +47,7 @@ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, spli\n self.scheduler = scheduler\n self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]\n self.split_batches = split_batches\n+ self.gradient_state = GradientState()\n self.step_with_optimizer = step_with_optimizer\n \n def step(self, *args, **kwargs):\n@@ -52,7 +60,6 @@ def step(self, *args, **kwargs):\n for opt in self.optimizers:\n if opt.step_was_skipped:\n return\n-\n if self.split_batches:\n # Split batches -> the training dataloader batch size is not changed so one step per training step\n self.scheduler.step(*args, **kwargs)\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 63527124c..330830eb5 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -52,6 +52,7 @@ class AcceleratorState:\n Attributes:\n \n - **device** (`torch.device`) -- The device to use.\n+ - **sync_gradients** (`bool`) -- Whether to sync the gradients or not\n - **distributed_type** (`~accelerate.state.DistributedType`) -- The type of distributed environment currently\n in use.\n - **num_processes** (`int`) -- The number of processes currently launched in parallel.\n@@ -209,3 +210,36 @@ def _check_initialized(self, mixed_precision=None, cpu=None):\n raise ValueError(err.format(flag=\"cpu=True\"))\n if mixed_precision is not None and mixed_precision != self.mixed_precision:\n raise ValueError(err.format(flag=f\"mixed_precision='{mixed_precision}'\"))\n+\n+\n+class GradientState:\n+ \"\"\"\n+ This is a variation of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n+ instance of `GradientState` share the same state, which is initialized on the first instantiation.\n+\n+ This specific state revolves around whether gradients should be synced and if we have reached the end of a prepared\n+ dataloader Attributes:\n+\n+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced\n+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\n+ \"\"\"\n+\n+ _shared_state = {}\n+\n+ def __init__(self):\n+ self.__dict__ = self._shared_state\n+ if not getattr(self, \"initialized\", False):\n+ self.sync_gradients = True\n+ self.end_of_dataloader = False\n+ self.initialized = True\n+\n+ def __repr__(self):\n+ return f\"Sync Gradients: {self.sync_gradients}\\n\" f\"At end of current dataloader: {self.end_of_dataloader}\\n\"\n+\n+ def _set_sync_gradients(self, sync_gradients):\n+ \"Private function that sets whether gradients should be synchronized. Users should not have to call this.\"\n+ self.sync_gradients = sync_gradients\n+\n+ def _set_end_of_dataloader(self, end_of_dataloader):\n+ \"Private function that sets whether the end of the current dataloader has been reached. Users should not have to call this.\"\n+ self.end_of_dataloader = end_of_dataloader\ndiff --git a/src/accelerate/test_utils/scripts/test_sync.py b/src/accelerate/test_utils/scripts/test_sync.py\nindex b58a8c092..0b0dc7abd 100644\n--- a/src/accelerate/test_utils/scripts/test_sync.py\n+++ b/src/accelerate/test_utils/scripts/test_sync.py\n@@ -16,6 +16,8 @@\n \n import torch\n import torch.nn.functional as F\n+from torch.optim import AdamW\n+from torch.optim.lr_scheduler import LambdaLR\n from torch.utils.data import DataLoader\n \n from accelerate import Accelerator\n@@ -23,30 +25,61 @@\n from accelerate.utils import DistributedType, set_seed\n \n \n-def step_model(model, input, target, accelerator):\n+def check_model_parameters(model_a, model_b, did_step):\n+ for param, grad_param in zip(model_a.parameters(), model_b.parameters()):\n+ if not param.requires_grad:\n+ continue\n+ if not did_step:\n+ # Grads should not be in sync\n+ assert (\n+ torch.allclose(param.grad, grad_param.grad) is False\n+ ), f\"Gradients in sync when they should not be:\\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})\"\n+ else:\n+ # Grads should be in sync\n+ assert (\n+ torch.allclose(param.grad, grad_param.grad) is True\n+ ), f\"Gradients not in sync when they should be:\\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})\"\n+\n+\n+def step_model(model, input, target, accelerator, do_backward=True):\n model.train()\n output = model(input)\n loss = F.mse_loss(output, target.to(output.device))\n- accelerator.backward(loss)\n+ if not do_backward:\n+ loss /= accelerator.gradient_accumulation_steps\n+ loss.backward()\n+ else:\n+ accelerator.backward(loss)\n \n \n-def get_training_setup(accelerator):\n+def get_training_setup(accelerator, sched=False):\n \"Returns everything needed to perform basic training\"\n set_seed(42)\n model = RegressionModel()\n- model.to(accelerator.device)\n- dset = RegressionDataset()\n+ ddp_model = deepcopy(model)\n+ dset = RegressionDataset(length=80)\n dataloader = DataLoader(dset, batch_size=16)\n+ model.to(accelerator.device)\n+ if sched:\n+ opt = AdamW(params=model.parameters(), lr=1e-3)\n+ ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3)\n+ sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65)\n+ ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65)\n # Make a copy of `model`\n- ddp_model, dataloader = accelerator.prepare(deepcopy(model), dataloader)\n- # Use a single batch for all of the tests\n- ddp_input, ddp_target = next(iter(dataloader)).values()\n- return model, ddp_model, ddp_input, ddp_target\n+ if sched:\n+ ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader)\n+ else:\n+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)\n+ if sched:\n+ return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)\n+ return model, ddp_model, dataloader\n \n \n def test_noop_sync(accelerator):\n # Test when on a single CPU or GPU that the context manager does nothing\n- model, ddp_model, ddp_input, ddp_target = get_training_setup(accelerator)\n+ model, ddp_model, dataloader = get_training_setup(accelerator)\n+ # Use a single batch\n+ ddp_input, ddp_target = next(iter(dataloader)).values()\n for iteration in range(3):\n # Gather the distributed inputs and targs for the base model\n input, target = accelerator.gather((ddp_input, ddp_target))\n@@ -63,6 +96,7 @@ def test_noop_sync(accelerator):\n step_model(ddp_model, ddp_input, ddp_target, accelerator)\n \n # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync\n+ check_model_parameters(model, ddp_model, True)\n for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):\n if not param.requires_grad:\n continue\n@@ -77,7 +111,9 @@ def test_noop_sync(accelerator):\n \n def test_distributed_sync(accelerator):\n # Test on distributed setup that context manager behaves properly\n- model, ddp_model, ddp_input, ddp_target = get_training_setup(accelerator)\n+ model, ddp_model, dataloader = get_training_setup(accelerator)\n+ # Use a single batch\n+ ddp_input, ddp_target = next(iter(dataloader)).values()\n for iteration in range(3):\n # Gather the distributed inputs and targs for the base model\n input, target = accelerator.gather((ddp_input, ddp_target))\n@@ -113,17 +149,92 @@ def test_distributed_sync(accelerator):\n ddp_input = ddp_input[torch.randperm(16)]\n \n \n+def test_gradient_accumulation():\n+ accelerator = Accelerator(gradient_accumulation_steps=2)\n+ # Test that context manager behaves properly\n+ model, ddp_model, dataloader = get_training_setup(accelerator)\n+ for iteration, batch in enumerate(dataloader):\n+ ddp_input, ddp_target = batch.values()\n+ # Gather the distributed inputs and targs for the base model\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ # Perform our initial ground truth step in non \"DDP\"\n+ step_model(model, input, target, accelerator, False)\n+ # Do \"gradient accumulation\" (noop)\n+ with accelerator.accumulate(ddp_model):\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+\n+ # DDP model and model should only be in sync when not (iteration % 2 == 0)\n+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):\n+ if not param.requires_grad:\n+ continue\n+ if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1):\n+ # Grads should be in sync\n+ assert (\n+ torch.allclose(param.grad, ddp_param.grad) is True\n+ ), f\"Gradients not in sync when they should be at iteration {iteration}:\\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})\"\n+ else:\n+ # Grads should not be in sync\n+ assert (\n+ torch.allclose(param.grad, ddp_param.grad) is False\n+ ), f\"Gradients in sync when they should not be at iteration {iteration}:\\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})\"\n+\n+ # Shuffle ddp_input on each iteration\n+ torch.manual_seed(1337 + iteration)\n+ ddp_input = ddp_input[torch.randperm(16)]\n+\n+\n+def test_gradient_accumulation_with_opt_and_scheduler():\n+ accelerator = Accelerator(gradient_accumulation_steps=2)\n+ # Test that context manager behaves properly\n+ model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True)\n+ for iteration, batch in enumerate(dataloader):\n+ ddp_input, ddp_target = batch.values()\n+ # Gather the distributed inputs and targs for the base model\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ # Perform our initial ground truth step in non \"DDP\"\n+ model.train()\n+ ddp_model.train()\n+ step_model(model, input, target, accelerator, False)\n+ opt.step()\n+ for _ in range(accelerator.num_processes):\n+ sched.step()\n+ opt.zero_grad()\n+ # Perform gradient accumulation under wrapper\n+ with accelerator.accumulate(ddp_model):\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+ ddp_opt.step()\n+ ddp_sched.step()\n+ ddp_opt.zero_grad()\n+\n+ # Learning rates should be the same\n+ assert opt.param_groups[0][\"lr\"] == ddp_opt.param_groups[0][\"lr\"]\n+ did_step = (((iteration + 1) % 2) == 0) or (iteration == (len(dataloader) - 1))\n+ if accelerator.num_processes > 1:\n+ check_model_parameters(model, ddp_model, did_step)\n+ # Shuffle ddp_input on each iteration\n+ torch.manual_seed(1337 + iteration)\n+\n+\n def main():\n accelerator = Accelerator()\n state = accelerator.state\n if state.distributed_type == DistributedType.NO:\n if state.local_process_index == 0:\n- print(\"**NOOP `no_sync` gradient accumulation**\")\n+ print(\"**Test NOOP `no_sync` context manager**\")\n test_noop_sync(accelerator)\n if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):\n if state.local_process_index == 0:\n- print(\"**Distributed `no_sync` gradient accumulation**\")\n+ print(\"**Test Distributed `no_sync` context manager**\")\n test_distributed_sync(accelerator)\n+ if state.distributed_type == DistributedType.MULTI_GPU:\n+ if state.local_process_index == 0:\n+ print(\"**Test `accumulate` gradient accumulation**\")\n+ test_gradient_accumulation()\n+ if state.local_process_index == 0:\n+ print(\"**Test `accumulate` gradient accumulation with optimizer and scheduler**\")\n+ test_gradient_accumulation_with_opt_and_scheduler()\n \n \n def _mp_fn(index):\ndiff --git a/tests/test_grad_sync.py b/tests/test_grad_sync.py\nindex c066bd41a..182d3ef95 100644\n--- a/tests/test_grad_sync.py\n+++ b/tests/test_grad_sync.py\n@@ -36,20 +36,19 @@ def setUp(self):\n self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_sync.py\"])\n \n @require_cpu\n- def test_gradient_sync_single_cpu_noop(self):\n- debug_launcher(test_sync.main)\n+ def test_gradient_sync_cpu_noop(self):\n debug_launcher(test_sync.main, num_processes=1)\n \n @require_cpu\n- def test_gradient_sync_multi_cpu(self):\n+ def test_gradient_sync_cpu_multi(self):\n debug_launcher(test_sync.main)\n \n @require_single_gpu\n- def test_gradient_sync_single_gpu(self):\n- debug_launcher(test_sync.main, num_processes=1)\n+ def test_gradient_sync_gpu(self):\n+ test_sync.main()\n \n @require_multi_gpu\n- def test_gradient_sync_multi_gpu(self):\n+ def test_gradient_sync_gpu_multi(self):\n print(f\"Found {torch.cuda.device_count()} devices.\")\n cmd = get_launch_prefix() + [f\"--nproc_per_node={torch.cuda.device_count()}\", self.test_file_path]\n with patch_environment(omp_num_threads=1):\ndiff --git a/tests/test_kwargs_handlers.py b/tests/test_kwargs_handlers.py\nindex 8b438d1c6..d8c893234 100644\n--- a/tests/test_kwargs_handlers.py\n+++ b/tests/test_kwargs_handlers.py\n@@ -21,6 +21,7 @@\n import torch\n \n from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs\n+from accelerate.state import AcceleratorState\n from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu\n from accelerate.utils import KwargsHandler\n \n@@ -44,7 +45,8 @@ def test_kwargs_handler(self):\n def test_grad_scaler_kwargs(self):\n # If no defaults are changed, `to_kwargs` returns an empty dict.\n scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2)\n- accelerator = Accelerator(fp16=True, kwargs_handlers=[scaler_handler])\n+ AcceleratorState._reset_state()\n+ accelerator = Accelerator(mixed_precision=\"fp16\", kwargs_handlers=[scaler_handler])\n print(accelerator.use_fp16)\n scaler = accelerator.scaler\n \n", "code_comments": [ { "body": "Note that the first part of the test is implied by the second ;-)", "diff_hunk": "@@ -367,6 +380,39 @@ def no_sync(self, model):\n with context():\n yield\n \n+ @property\n+ def _do_sync(self) -> bool:\n+ \"Checks if gradients should be synchronized and the optimizers + schedulers should be stepped\"\n+ if self.state.end_of_dataloader:\n+ self.step = 0\n+ return True\n+ self.step += 1\n+ if (self.gradient_accumulation_steps == 1) or ((self.step % self.gradient_accumulation_steps) == 0):", "from_author": false }, { "body": "This shouldn't be in the state, the state only should contain the environment info, not some things that are subject to change. So store it directly in self.", "diff_hunk": "@@ -367,6 +380,39 @@ def no_sync(self, model):\n with context():\n yield\n \n+ @property\n+ def _do_sync(self) -> bool:\n+ \"Checks if gradients should be synchronized and the optimizers + schedulers should be stepped\"\n+ if self.state.end_of_dataloader:", "from_author": false }, { "body": "Same here, this should be stored somewhere else.", "diff_hunk": "@@ -367,6 +380,39 @@ def no_sync(self, model):\n with context():\n yield\n \n+ @property\n+ def _do_sync(self) -> bool:\n+ \"Checks if gradients should be synchronized and the optimizers + schedulers should be stepped\"\n+ if self.state.end_of_dataloader:\n+ self.step = 0\n+ return True\n+ self.step += 1\n+ if (self.gradient_accumulation_steps == 1) or ((self.step % self.gradient_accumulation_steps) == 0):\n+ return True\n+ return False\n+\n+ @contextmanager\n+ def accumulate(self, model):\n+ \"\"\"\n+ A context manager that will lightly wrap around and perform gradient accumulation automatically\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ PyTorch Module that was prepared with `Accelerator.prepare`\n+ dataloader (`torch.utils.DataLoader`)\n+ PyTorch DataLoader that was prepared with `Accelerator.prepare`\n+ \"\"\"\n+\n+ if self._do_sync:\n+ context = contextlib.nullcontext\n+ AcceleratorState._set_state(\"sync_gradients\", True)\n+ else:\n+ context = self.no_sync\n+ AcceleratorState._set_state(\"sync_gradients\", False)", "from_author": false }, { "body": "```suggestion\r\n gradient_accumulation_steps (`int`, *optional*, defaults to 1):\r\n```", "diff_hunk": "@@ -92,6 +92,9 @@ class Accelerator:\n default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ gradient_accumulation_steps (`int`, *optional*):", "from_author": false }, { "body": "```suggestion\r\n [`Accelerator.accumulate`].\r\n```\r\nLink to the doc.", "diff_hunk": "@@ -92,6 +92,9 @@ class Accelerator:\n default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ gradient_accumulation_steps (`int`, *optional*):\n+ The number of steps that should pass before gradients are accumulated. Should be combined with\n+ `Accelerator.accumulate`", "from_author": false }, { "body": "I think the class should have a static method to set this.", "diff_hunk": "@@ -367,6 +380,37 @@ def no_sync(self, model):\n with context():\n yield\n \n+ def _do_sync(self) -> bool:\n+ \"Checks if gradients should be synchronized and the optimizers + schedulers should be stepped\"\n+ if GradientState().end_of_dataloader:\n+ self.step = 0\n+ return True\n+ else:\n+ self.step += 1\n+ return (self.step % self.gradient_accumulation_steps) == 0\n+\n+ @contextmanager\n+ def accumulate(self, model):\n+ \"\"\"\n+ A context manager that will lightly wrap around and perform gradient accumulation automatically\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ PyTorch Module that was prepared with `Accelerator.prepare`\n+ dataloader (`torch.utils.DataLoader`)\n+ PyTorch DataLoader that was prepared with `Accelerator.prepare`\n+ \"\"\"\n+\n+ if self._do_sync():\n+ context = contextlib.nullcontext\n+ GradientState._set_state(\"sync_gradients\", True)", "from_author": false }, { "body": "Same here", "diff_hunk": "@@ -367,6 +380,37 @@ def no_sync(self, model):\n with context():\n yield\n \n+ def _do_sync(self) -> bool:\n+ \"Checks if gradients should be synchronized and the optimizers + schedulers should be stepped\"\n+ if GradientState().end_of_dataloader:\n+ self.step = 0\n+ return True\n+ else:\n+ self.step += 1\n+ return (self.step % self.gradient_accumulation_steps) == 0\n+\n+ @contextmanager\n+ def accumulate(self, model):\n+ \"\"\"\n+ A context manager that will lightly wrap around and perform gradient accumulation automatically\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ PyTorch Module that was prepared with `Accelerator.prepare`\n+ dataloader (`torch.utils.DataLoader`)\n+ PyTorch DataLoader that was prepared with `Accelerator.prepare`\n+ \"\"\"\n+\n+ if self._do_sync():\n+ context = contextlib.nullcontext\n+ GradientState._set_state(\"sync_gradients\", True)\n+ else:\n+ context = self.no_sync\n+ GradientState._set_state(\"sync_gradients\", False)", "from_author": false }, { "body": "The dataloader may still have no length, so set to `False at the start of `__iter__` and to `True` at the end.", "diff_hunk": "@@ -298,7 +298,8 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n- for batch in super().__iter__():\n+ for i, batch in enumerate(super().__iter__()):\n+ GradientState._set_state(\"end_of_dataloader\", i == (len(self) - 1))", "from_author": false }, { "body": "```suggestion\r\n GradientState._set_state(\"end_of_dataloader\", True)\r\n```\r\nLike this.\r\n", "diff_hunk": "@@ -406,6 +408,8 @@ def __iter__(self):\n \n data_slice = slice(state.process_index * batch_size, (state.process_index + 1) * batch_size)\n yield slice_tensors(batch, data_slice)\n+ current_idx += 1\n+ GradientState._set_state(\"end_of_dataloader\", current_idx == len(self) - 1)", "from_author": false }, { "body": "For the scheduler, I'm not entirely sure how to proceed: we step for each process to account for Distributed training, not sure if we should step all the time or just when synchronizing gradients (since the length set for the scheduler probably does not account for gradient accumulation steps).", "diff_hunk": "@@ -52,18 +53,18 @@ def step(self, *args, **kwargs):\n for opt in self.optimizers:\n if opt.step_was_skipped:\n return\n-\n- if self.split_batches:\n- # Split batches -> the training dataloader batch size is not changed so one step per training step\n- self.scheduler.step(*args, **kwargs)\n- else:\n- # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do\n- # num_processes steps per training step\n- num_processes = AcceleratorState().num_processes\n- for _ in range(num_processes):\n- # Special case when using OneCycle and `drop_last` was not used\n- if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\n- self.scheduler.step(*args, **kwargs)\n+ if self.gradient_state.sync_gradients:\n+ if self.split_batches:\n+ # Split batches -> the training dataloader batch size is not changed so one step per training step\n+ self.scheduler.step(*args, **kwargs)\n+ else:\n+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do\n+ # num_processes steps per training step\n+ num_processes = AcceleratorState().num_processes\n+ for _ in range(num_processes):\n+ # Special case when using OneCycle and `drop_last` was not used\n+ if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\n+ self.scheduler.step(*args, **kwargs)", "from_author": false }, { "body": "Add two static methods to quickly set `sync_gradients` and `end_of_dataloader` (also wondering why you can't jsut set them directly on an instance? It should automatically update everything no?)", "diff_hunk": "@@ -209,3 +210,35 @@ def _check_initialized(self, mixed_precision=None, cpu=None):\n raise ValueError(err.format(flag=\"cpu=True\"))\n if mixed_precision is not None and mixed_precision != self.mixed_precision:\n raise ValueError(err.format(flag=f\"mixed_precision='{mixed_precision}'\"))\n+\n+\n+class GradientState:\n+ \"\"\"\n+ This is a variation of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n+ instance of `GradientState` share the same state, which is initialized on the first instantiation.\n+\n+ This specific state revolves around whether gradients should be synced and if we have reached the end of a prepared\n+ dataloader Attributes:\n+\n+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced\n+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\n+ \"\"\"\n+\n+ _shared_state = {}\n+\n+ def __init__(self):\n+ self.__dict__ = self._shared_state\n+ if not getattr(self, \"initialized\", False):\n+ self.sync_gradients = True\n+ self.end_of_dataloader = False\n+ self.initialized = True\n+\n+ @staticmethod\n+ def _set_state(key, val):\n+ \"Sets `key` to `val` in GradientState\"\n+ if GradientState._shared_state != {} and key not in GradientState._shared_state.keys():\n+ raise KeyError(f\"{key} is not a valid key of `GradientState`, {GradientState._shared_state.keys()}\")\n+ GradientState._shared_state[key] = val\n+", "from_author": false }, { "body": "Resolved: we update every time. ", "diff_hunk": "@@ -52,18 +53,18 @@ def step(self, *args, **kwargs):\n for opt in self.optimizers:\n if opt.step_was_skipped:\n return\n-\n- if self.split_batches:\n- # Split batches -> the training dataloader batch size is not changed so one step per training step\n- self.scheduler.step(*args, **kwargs)\n- else:\n- # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do\n- # num_processes steps per training step\n- num_processes = AcceleratorState().num_processes\n- for _ in range(num_processes):\n- # Special case when using OneCycle and `drop_last` was not used\n- if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\n- self.scheduler.step(*args, **kwargs)\n+ if self.gradient_state.sync_gradients:\n+ if self.split_batches:\n+ # Split batches -> the training dataloader batch size is not changed so one step per training step\n+ self.scheduler.step(*args, **kwargs)\n+ else:\n+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do\n+ # num_processes steps per training step\n+ num_processes = AcceleratorState().num_processes\n+ for _ in range(num_processes):\n+ # Special case when using OneCycle and `drop_last` was not used\n+ if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\n+ self.scheduler.step(*args, **kwargs)", "from_author": true }, { "body": "This is unnecessary complex. Why are you not doing `self.gradient_state._set_end_of_dataloader(True)` at the end of `iter`?", "diff_hunk": "@@ -294,12 +296,26 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ _current_batch = 0\n+ _dataloader_length = len(self) - 1 if hasattr(self, \"__len__\") else False\n for batch in super().__iter__():\n+ if _dataloader_length:\n+ self.gradient_state._set_end_of_dataloader(_current_batch == _dataloader_length)\n+ _current_batch += 1\n+ else:\n+ self.gradient_state._set_end_of_dataloader(False)\n yield batch if self.device is None else send_to_device(batch, self.device)\n+ if not self.gradient_state.end_of_dataloader:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ logger.warn(\n+ \"Warning! DataLoader had no length and finished iterating. Backwards pass and stepping must be manually performed one last time.\"\n+ )", "from_author": false }, { "body": "Not sure about this second test. If `step_with_optimizer=True` we then only step for sync gradients, which is not what we want.", "diff_hunk": "@@ -40,10 +47,11 @@ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, spli\n self.scheduler = scheduler\n self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]\n self.split_batches = split_batches\n+ self.gradient_state = GradientState()\n self.step_with_optimizer = step_with_optimizer\n \n def step(self, *args, **kwargs):\n- if not self.step_with_optimizer:\n+ if not self.step_with_optimizer or not self.gradient_state.sync_gradients:", "from_author": false }, { "body": "The filter should be done in the scheduler, not the test scripts.", "diff_hunk": "@@ -12,41 +12,62 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import warnings\n from copy import deepcopy\n \n import torch\n import torch.nn.functional as F\n+from torch.optim import AdamW\n+from torch.optim.lr_scheduler import LambdaLR\n from torch.utils.data import DataLoader\n \n from accelerate import Accelerator\n from accelerate.test_utils import RegressionDataset, RegressionModel\n from accelerate.utils import DistributedType, set_seed\n \n \n-def step_model(model, input, target, accelerator):\n+warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.optim.lr_scheduler\")", "from_author": false }, { "body": "This check has to happen as the last batch is being pushed out, otherwise it doesn't occur until outside the iteration loop. \r\n\r\nExample in python of what doesn't happen:\r\n\r\n```python\r\na = [0,1,2,3]\r\nclass A:\r\n def __init__(self,a):\r\n self.a,self.is_end = a,False\r\n def __iter__(self):\r\n for item in a: yield item\r\n self.is_end = True\r\naa = A(a)\r\nfor i,item in enumerate(aa):\r\n print(f'At end at {i}?: {aa.is_end}')\r\nprint(aa.is_end)\r\n```\r\nResult:\r\n```\r\nAt end at 0?: False\r\nAt end at 1?: False\r\nAt end at 2?: False\r\nAt end at 3?: False\r\nTrue\r\n```\r\nWhat we want to have happen:\r\n```python\r\nclass A:\r\n def __init__(self,a):\r\n self.a,self.is_end = a,False\r\n def __iter__(self):\r\n for i,item in enumerate(a):\r\n if i == len(a)-1:\r\n self.is_end = True \r\n yield item\r\n self.is_end = True\r\n```\r\n```\r\nAt end at 0?: False\r\nAt end at 1?: False\r\nAt end at 2?: False\r\nAt end at 3?: True\r\nTrue\r\n```\r\nSince we cannot check the length sometimes, this extra step + warning needs to be done.", "diff_hunk": "@@ -294,12 +296,26 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ _current_batch = 0\n+ _dataloader_length = len(self) - 1 if hasattr(self, \"__len__\") else False\n for batch in super().__iter__():\n+ if _dataloader_length:\n+ self.gradient_state._set_end_of_dataloader(_current_batch == _dataloader_length)\n+ _current_batch += 1\n+ else:\n+ self.gradient_state._set_end_of_dataloader(False)\n yield batch if self.device is None else send_to_device(batch, self.device)\n+ if not self.gradient_state.end_of_dataloader:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ logger.warn(\n+ \"Warning! DataLoader had no length and finished iterating. Backwards pass and stepping must be manually performed one last time.\"\n+ )", "from_author": true }, { "body": "I think there's a way to do this easier, will toy with it today. ", "diff_hunk": "@@ -294,12 +296,26 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ _current_batch = 0\n+ _dataloader_length = len(self) - 1 if hasattr(self, \"__len__\") else False\n for batch in super().__iter__():\n+ if _dataloader_length:\n+ self.gradient_state._set_end_of_dataloader(_current_batch == _dataloader_length)\n+ _current_batch += 1\n+ else:\n+ self.gradient_state._set_end_of_dataloader(False)\n yield batch if self.device is None else send_to_device(batch, self.device)\n+ if not self.gradient_state.end_of_dataloader:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ logger.warn(\n+ \"Warning! DataLoader had no length and finished iterating. Backwards pass and stepping must be manually performed one last time.\"\n+ )", "from_author": true }, { "body": "Much simpler way for us to iterate over the dataloader, and given we only move to device right before we yield there shouldn't be too much trouble with multiple batches in memory at one time. If so let me know.", "diff_hunk": "@@ -294,12 +296,27 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n- for batch in super().__iter__():\n- yield batch if self.device is None else send_to_device(batch, self.device)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ dataloader_iter = super().__iter__()\n+ # We iterate one batch ahead to check when we are at the end\n+ current_batch = next(dataloader_iter)\n+ while True:\n+ try:\n+ # But we still move it to the device so it is done before `StopIteration` is reached\n+ if self.device is not None:\n+ current_batch = send_to_device(current_batch, self.device)\n+ next_batch = next(dataloader_iter)\n+ yield current_batch\n+ current_batch = next_batch\n+ except StopIteration:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ yield current_batch\n+ break", "from_author": true }, { "body": "We should have a try except here too in case of an empty dataloader.", "diff_hunk": "@@ -294,12 +296,27 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n- for batch in super().__iter__():\n- yield batch if self.device is None else send_to_device(batch, self.device)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ dataloader_iter = super().__iter__()\n+ # We iterate one batch ahead to check when we are at the end\n+ current_batch = next(dataloader_iter)", "from_author": false }, { "body": "Very clever!", "diff_hunk": "@@ -294,12 +296,27 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n self.device = device\n self.rng_types = rng_types\n self.generator = generator\n+ self.gradient_state = GradientState()\n \n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n- for batch in super().__iter__():\n- yield batch if self.device is None else send_to_device(batch, self.device)\n+ self.gradient_state._set_end_of_dataloader(False)\n+ dataloader_iter = super().__iter__()\n+ # We iterate one batch ahead to check when we are at the end\n+ current_batch = next(dataloader_iter)\n+ while True:\n+ try:\n+ # But we still move it to the device so it is done before `StopIteration` is reached\n+ if self.device is not None:\n+ current_batch = send_to_device(current_batch, self.device)\n+ next_batch = next(dataloader_iter)\n+ yield current_batch\n+ current_batch = next_batch\n+ except StopIteration:\n+ self.gradient_state._set_end_of_dataloader(True)\n+ yield current_batch\n+ break", "from_author": false }, { "body": "Small docstrings here would be great :-) (with a mention that it's private for a good reason and shouldn't be touched by the user directly!)", "diff_hunk": "@@ -209,3 +210,34 @@ def _check_initialized(self, mixed_precision=None, cpu=None):\n raise ValueError(err.format(flag=\"cpu=True\"))\n if mixed_precision is not None and mixed_precision != self.mixed_precision:\n raise ValueError(err.format(flag=f\"mixed_precision='{mixed_precision}'\"))\n+\n+\n+class GradientState:\n+ \"\"\"\n+ This is a variation of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n+ instance of `GradientState` share the same state, which is initialized on the first instantiation.\n+\n+ This specific state revolves around whether gradients should be synced and if we have reached the end of a prepared\n+ dataloader Attributes:\n+\n+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced\n+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader\n+ \"\"\"\n+\n+ _shared_state = {}\n+\n+ def __init__(self):\n+ self.__dict__ = self._shared_state\n+ if not getattr(self, \"initialized\", False):\n+ self.sync_gradients = True\n+ self.end_of_dataloader = False\n+ self.initialized = True\n+\n+ def __repr__(self):\n+ return f\"Sync Gradients: {self.sync_gradients}\\n\" f\"At end of current dataloader: {self.end_of_dataloader}\\n\"\n+\n+ def _set_sync_gradients(self, sync_gradients):\n+ self.sync_gradients = sync_gradients\n+\n+ def _set_end_of_dataloader(self, end_of_dataloader):\n+ self.end_of_dataloader = end_of_dataloader", "from_author": false }, { "body": "No we shouldn't raise here, just yield nothing. A training loop over an empty dataloader works, just doesn't do anything.\r\nWe can put a warning however, just in case the user didn't intend that.", "diff_hunk": "@@ -304,7 +304,10 @@ def __iter__(self):\n self.gradient_state._set_end_of_dataloader(False)\n dataloader_iter = super().__iter__()\n # We iterate one batch ahead to check when we are at the end\n- current_batch = next(dataloader_iter)\n+ try:\n+ current_batch = next(dataloader_iter)\n+ except StopIteration:\n+ raise StopIteration(\"Tried iterating over an empty dataloader\")", "from_author": false }, { "body": "```suggestion\r\nThis will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should\r\n```", "diff_hunk": "@@ -38,4 +38,23 @@ should search for and replace by the corresponding methods of your `accelerator`\n - Use [`~Accelerator.clip_grad_norm_`] instead of `torch.nn.utils.clip_grad_norm_` and\n [`~Accelerator.clip_grad_value_`] instead of `torch.nn.utils.clip_grad_value_`.\n \n+To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`. \n+This will also automatically ensure the gradients are synced or unsynced when on multi-node, check if the step should", "from_author": false }, { "body": "Use the proper syntax for a warning here\r\n```suggestion\r\n<Tip warning={true}>\r\n\r\nUsing this with `dispatch_batches=True` (which is the default for iterable datasets) is currently not supported.\r\n\r\n</Tip>\r\n```", "diff_hunk": "@@ -38,4 +38,23 @@ should search for and replace by the corresponding methods of your `accelerator`\n - Use [`~Accelerator.clip_grad_norm_`] instead of `torch.nn.utils.clip_grad_norm_` and\n [`~Accelerator.clip_grad_value_`] instead of `torch.nn.utils.clip_grad_value_`.\n \n+To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`. \n+This will also automatically ensure the gradients are synced or unsynced when on multi-node, check if the step should\n+actually be performed, and auto-scale the loss:\n+\n+```python\n+accelerator = Accelerator(gradient_accumulation_steps=2)\n+\n+for (input, label) in enumerate(training_dataloader):\n+ with accelerator.accumulate(model):\n+ predictions = model(input)\n+ loss = loss_function(predictions, labels)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ scheduler.step()\n+ optimizer.zero_grad()\n+```\n+\n+**Note**: Using this with `dispatch_batches=True` is currently not supported.", "from_author": false }, { "body": "```suggestion\r\n gradient_accumulation_steps (`int`, *optional*, default to 1):\r\n```", "diff_hunk": "@@ -92,6 +92,9 @@ class Accelerator:\n default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ gradient_accumulation_steps (`int`, *optional*):", "from_author": false }, { "body": "```suggestion\r\n The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with\r\n```", "diff_hunk": "@@ -92,6 +92,9 @@ class Accelerator:\n default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ gradient_accumulation_steps (`int`, *optional*):\n+ The number of steps that should pass before gradients are accumulated. Should be combined with", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/484", "pr_id": 983930046 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 01dc0439a..a24428c33 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n- \"datasets\",\n+ \"datasets<=2.2.2\",\n \"evaluate\",\n \"transformers\",\n \"scipy\",\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/481", "pr_id": 981633392 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 3b8714911..6880f4626 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -136,7 +136,7 @@ def training_function(config, args):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex f1f54d78d..e380d2d85 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -143,7 +143,7 @@ def training_function(config, args):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 4f0311785..2d1cf2f95 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -140,7 +140,7 @@ def tokenize_function(examples):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex d0a044bcd..b69978023 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -111,6 +111,10 @@ def training_function(config, args):\n batch_size = int(config[\"batch_size\"])\n # New Code #\n gradient_accumulation_steps = int(args.gradient_accumulation_steps)\n+ if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:\n+ raise NotImplementedError(\n+ \"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`\"\n+ )\n \n metric = evaluate.load(\"glue\", \"mrpc\")\n \n@@ -151,6 +155,7 @@ def training_function(config, args):\n # We use the new `no_sync` context manager to prevent gradient averaging\n # until we want to at the proper step if we happen to be in a distributed setup\n # otherwise it does nothing\n+ # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.\n if step % gradient_accumulation_steps != 0:\n # Accumulate gradients locally\n with accelerator.no_sync(model):\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 7aef8e1ff..65feed51c 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -119,7 +119,7 @@ def training_function(config, args):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex 788c54f50..c9445ee81 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -121,7 +121,7 @@ def training_function(config, args):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 3b7b4c8d4..ef2b1388b 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -131,7 +131,7 @@ def training_function(config, args):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex f13fdfd27..0433ed9a1 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -104,7 +104,7 @@ def tokenize_function(examples):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 00713e886..55dedae5e 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -105,7 +105,7 @@ def training_function(config, args):\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\n gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n batch_size = MAX_GPU_BATCH_SIZE\n \n", "code_comments": [ { "body": "No, you should raise an error at the beginning if on TPU with a gradient accumulation steps argument passed > 1.", "diff_hunk": "@@ -110,7 +110,10 @@ def training_function(config, args):\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n # New Code #\n- gradient_accumulation_steps = int(args.gradient_accumulation_steps)\n+ if accelerator.distributed_type != DistributedType.TPU:\n+ gradient_accumulation_steps = int(args.gradient_accumulation_steps)\n+ else:\n+ gradient_accumulation_steps = 1", "from_author": false }, { "body": "Don't need to change here since `gradient_accumulation_steps` will always be 1.", "diff_hunk": "@@ -196,7 +196,8 @@ def collate_fn(examples):\n if args.with_tracking:\n total_loss += loss.detach().float()\n accelerator.backward(loss)\n- if step % gradient_accumulation_steps == 0:\n+ # We currently do not support nor recommend gradient accumulation on the TPU as bugs were found on the XLA side when running our tests\n+ if step % gradient_accumulation_steps == 0 or accelerator.distributed_type == DistributedType.TPU:", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -146,7 +146,8 @@ def training_function(config, args):\n loss = outputs.loss\n loss = loss / gradient_accumulation_steps\n accelerator.backward(loss)\n- if step % gradient_accumulation_steps == 0:\n+ # We currently do not support nor recommend gradient accumulation on the TPU as bugs were found on the XLA side when running our tests\n+ if step % gradient_accumulation_steps == 0 or accelerator.distributed_type == DistributedType.TPU:", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/479", "pr_id": 981566884 }, { "diff": "diff --git a/setup.py b/setup.py\nindex b48dc63ad..01dc0439a 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n- \"datasets<=2.3.1\",\n+ \"datasets\",\n \"evaluate\",\n \"transformers\",\n \"scipy\",\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/478", "pr_id": 981465446 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 01dc0439a..b48dc63ad 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n- \"datasets\",\n+ \"datasets<=2.3.1\",\n \"evaluate\",\n \"transformers\",\n \"scipy\",\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Looks like the PR with the fix will be merged soon though? But feel free to pin if you want in the meantime :-)", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/477", "pr_id": 981430359 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 4f0311785..421a79d6b 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -112,11 +112,8 @@ def training_function(config, args):\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n if accelerator.is_main_process:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.print(run)\n- accelerator.init_trackers(run, config)\n+ experiment_config = vars(args)\n+ accelerator.init_trackers(\"fsdp_glue_no_trainer\", experiment_config)\n \n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n datasets = load_dataset(\"glue\", \"mrpc\")\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1b3fa27e9..99afddeb8 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -188,8 +188,8 @@ def __init__(\n deepspeed_plugin.set_deepspeed_weakref()\n \n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" or isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n- if is_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):\n- raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n+ if is_torch_version(\"<\", \"1.12.0\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0\")\n \n if fsdp_plugin is None: # init from env variables\n fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" else None\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 3ccf2012b..28fc3826d 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -442,7 +442,7 @@ class FullyShardedDataParallelPlugin:\n \n def __post_init__(self):\n from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n- from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy\n \n if self.sharding_strategy is None:\n self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n@@ -458,4 +458,6 @@ def __post_init__(self):\n \n if self.auto_wrap_policy is None:\n if self.min_num_params > 0:\n- self.auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=self.min_num_params)\n+ self.auto_wrap_policy = functools.partial(\n+ size_based_auto_wrap_policy, min_num_params=self.min_num_params\n+ )\n", "code_comments": [], "context": [ { "body": "@pacman100 I'd recommend perhaps doing a version check to see what version of torch we're on to import one vs the other?", "from_author": false }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> @pacman100 I'd recommend perhaps doing a version check to see what version of torch we're on to import one vs the other?\r\n\r\nDone. As the FSDP support needs torch nightlies, just bumping up the version is viable.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/475", "pr_id": 981255542 }, { "diff": "diff --git a/docs/source/accelerator.mdx b/docs/source/accelerator.mdx\nindex 990b75443..6e7a93a3d 100644\n--- a/docs/source/accelerator.mdx\n+++ b/docs/source/accelerator.mdx\n@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.\n # Accelerator\n \n The [`Accelerator`] is the main class provided by πŸ€— Accelerate. It serves at the main entrypoint for\n-the API. To quickly adapt your script to work on any kind of setup with πŸ€— Accelerate juste:\n+the API. To quickly adapt your script to work on any kind of setup with πŸ€— Accelerate just:\n \n 1. Initialize an [`Accelerator`] object (that we will call `accelerator` in the rest of this\n page) as early as possible in your script.\n@@ -21,10 +21,10 @@ the API. To quickly adapt your script to work on any kind of setup with πŸ€— Acc\n 3. (Optional but best practice) Remove all the `.cuda()` or `.to(device)` in your code and let the\n `accelerator` handle device placement for you.\n 4. Replace the `loss.backward()` in your code by `accelerator.backward(loss)`.\n-5. (Optional, when using distributed evaluation) Gather your predictions and labelsbefore storing them or using them\n- for metric computation using [`~Accelerator.gather`].\n+5. (Optional, when using distributed evaluation) Gather your predictions and labels before storing them or using\n+ them for metric computation using [`~Accelerator.gather`].\n \n-This is all what is needed in most cases. For more advanced case or a nicer experience here are the functions you\n+This is all that is needed in most cases. For more advanced cases or a nicer experience here are the functions you\n should search for and replace by the corresponding methods of your `accelerator`:\n \n - `print` statements should be replaced by [`~Accelerator.print`] to be only printed once per\ndiff --git a/docs/source/big_modeling.mdx b/docs/source/big_modeling.mdx\nindex 4ca316870..a5cb3d4ed 100644\n--- a/docs/source/big_modeling.mdx\n+++ b/docs/source/big_modeling.mdx\n@@ -27,7 +27,7 @@ In plain English, those steps are:\n 2. Load the model weights (in a dictionary usually called a state dict) from the disk\n 3. Load those weights inside the model\n \n-While this works very well for regularly sized models, this workflow has some clear limitation when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pretrained weights. If you're loading a model with 6 billions parameters, this needs you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).\n+While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pretrained weights. If you're loading a model with 6 billions parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).\n \n <Tip warning={true}>\n \n@@ -37,7 +37,7 @@ This API is quite new and still in its experimental stage. While we strive to pr\n \n ## Instantiating an empty model\n \n-The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:\n+The first tool πŸ€— Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:\n \n ```py\n from accelerate import init_empty_weights\n@@ -65,7 +65,7 @@ You can't move a model initialized like this on CPU or another device directly,\n \n It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split in several smaller files that we call checkpoint shards.\n \n-Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. For instance we could have a folder containing:\n+πŸ€— Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. For instance we could have a folder containing:\n \n ```bash\n first_state_dict.bin\n@@ -88,7 +88,7 @@ and `first_state_dict.bin` containing the weights for `\"linear1.weight\"` and `\"l\n \n ## Loading weights\n \n-The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.\n+The second tool πŸ€— Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.\n \n Here is how we can use this to load the [GPT-J-6B](https://huggingface.co/EleutherAI/gpt-j-6B) model. You clone the sharded version of this model with:\n \n@@ -122,14 +122,14 @@ model = load_checkpoint_and_dispatch(\n )\n ```\n \n-By passing `device_map=\"auto\"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources:\n+By passing `device_map=\"auto\"`, we tell πŸ€— Accelerate to determine automatically where to put each layer of the model depending on the available resources:\n - first we use the maximum space available on the GPU(s)\n - if we still need space, we store the remaining weights on the CPU\n - if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors\n \n `no_split_module_classes=[\"GPTJBlock\"]` indicates that the modules that are `GPTJBlock` should not be split on different devices. You should set here all blocks that include a residual connection of some kind.\n \n-You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model:\n+You can see the `device_map` that πŸ€— Accelerate picked by accessing the `hf_device_map` attribute of your model:\n \n ```py\n model.hf_device_map\n@@ -190,7 +190,7 @@ output = model.generate(inputs[\"input_ids\"])\n tokenizer.decode(output[0].tolist())\n ```\n \n-Behind the scenes, Accelerate added hooks to the model, so that:\n+Behind the scenes, πŸ€— Accelerate added hooks to the model, so that:\n - at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works)\n - for the weights offloaded on the CPU, they are put on a GPU just before the forward pass, and cleaned up just after\n - for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass, and cleaned up just after\n@@ -207,7 +207,7 @@ This only supports inference of your model, not training. Most of the computatio\n \n We are aware of the current limitations in the API:\n \n-- While this could theoretically work just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development.\n+- While this could theoretically work on just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development.\n - [`infer_auto_device_map`] (or `device_map=\"auto\"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to lack of RAM.\n - [`infer_auto_device_map`] (or `device_map=\"auto\"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk.\n - [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.\ndiff --git a/docs/source/checkpoint.mdx b/docs/source/checkpoint.mdx\nindex 26ef21150..c818c9847 100644\n--- a/docs/source/checkpoint.mdx\n+++ b/docs/source/checkpoint.mdx\n@@ -12,8 +12,8 @@ specific language governing permissions and limitations under the License.\n \n # Checkpointing\n \n-When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires\n-saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convience functions to achieve this quickly:\n+When training a PyTorch model with πŸ€— Accelerate, you may often want to save and continue a state of training. Doing so requires\n+saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside πŸ€— Accelerate are two convience functions to achieve this quickly:\n - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\n - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\n \n@@ -57,4 +57,4 @@ for epoch in range(num_epochs):\n \n # Restore previous state\n accelerate.load_state(\"my/save/path\")\n-```\n\\ No newline at end of file\n+```\ndiff --git a/docs/source/fsdp.mdx b/docs/source/fsdp.mdx\nindex e2a0210c8..32f982786 100644\n--- a/docs/source/fsdp.mdx\n+++ b/docs/source/fsdp.mdx\n@@ -18,7 +18,7 @@ To read more about it and the benefits, check out the [Fully Sharded Data Parall\n We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.\n All you need to do is enable it through the config.\n \n-## How it works out the box\n+## How it works out of the box\n \n On your machine(s) just run:\n \n@@ -57,7 +57,7 @@ use_cpu: false\n accelerate launch examples/nlp_example.py\n ```\n \n-Currently, `Accelerate` supports following config through the CLI:\n+Currently, `Accelerate` supports the following config through the CLI:\n \n ```bash\n `Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n@@ -65,11 +65,11 @@ Currently, `Accelerate` supports following config through the CLI:\n `Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n ```\n \n-## Few caveats to be aware of\n+## A few caveats to be aware of\n \n - PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.\n Due to this, any optimizer created before model wrapping gets broken and occupies more memory.\n- Hence, it is highly recommended and efficient to prepare model before creating optimizer.\n+ Hence, it is highly recommended and efficient to prepare the model before creating the optimizer.\n `Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.\n > FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer\n \n@@ -91,14 +91,14 @@ optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n \n ```\n \n-- In case of a single model, if you have created optimizer with multiple parameter groups and called prepare with them together,\n+- In case of a single model, if you have created the optimizer with multiple parameter groups and called prepare with them together,\n then the parameter groups will be lost and the following warning is displayed:\n > FSDP Warning: When using FSDP, several parameter groups will be conflated into\n > a single one due to nested module wrapping and parameter flattening.\n \n- This is because parameter groups created before wrapping will have no meaning post wrapping due parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers).\n- For instance, below are the named parameters of FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters). \n- Here, if one has applied no weight decay for [bias, LayerNorm.weight] named parameters of unwrapped BERT model, \n+ This is because parameter groups created before wrapping will have no meaning post wrapping due to parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers).\n+ For instance, below are the named parameters of an FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters). \n+ Here, if one has applied no weight decay for [bias, LayerNorm.weight] the named parameters of an unwrapped BERT model, \n it can't be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and \n the parameters of those layers are concatenated with parameters of various other layers.\n ```\n@@ -110,7 +110,7 @@ optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n ```\n \n \n-- In case of multiple models, it is necessary to prepare the models before creating optimizers else it will throw an error.\n+- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error.\n - Mixed precision is currently not supported with FSDP.\n \n For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\ndiff --git a/docs/source/index.mdx b/docs/source/index.mdx\nindex 9d9000097..16593d554 100644\n--- a/docs/source/index.mdx\n+++ b/docs/source/index.mdx\n@@ -12,16 +12,16 @@ specific language governing permissions and limitations under the License.\n \n # Accelerate\n \n-Run your *raw* PyTorch training script on any kind of device\n+Run your *raw* PyTorch training script on any kind of device.\n \n ## Features\n \n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then runs\n+- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and in any kind of distributed\n+ setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run\n seamlessly on your local machine for debugging or your training environment.\n \n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment then\n- launch the scripts.\n+- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment and\n+ then launch the scripts.\n \n \n ## Easy to integrate\ndiff --git a/docs/source/installation.mdx b/docs/source/installation.mdx\nindex be8394af7..26c2698aa 100644\n--- a/docs/source/installation.mdx\n+++ b/docs/source/installation.mdx\n@@ -57,7 +57,7 @@ pip install git+https://github.com/huggingface/accelerate\n \n Note that this will install not the latest released version, but the bleeding edge `main` version, which you may want to use in case a bug has been fixed since the last official release and a new release hasn't been yet rolled out.\n \n-While we strive to keep `main` operational at all times, if you notice some issues, they usually get fixed within a few hours or a day and and you're more than welcome to help us detect any problems by opening an [Issue](https://github.com/huggingface/accelerate/issues) and this way, things will get fixed even sooner.\n+While we strive to keep `main` operational at all times, if you notice some issues, they usually get fixed within a few hours or a day and you're more than welcome to help us detect any problems by opening an [Issue](https://github.com/huggingface/accelerate/issues) and this way, things will get fixed even sooner.\n \n Again, you can run:\n \n@@ -85,7 +85,7 @@ now this editable install will reside where you clone the folder to, e.g. `~/acc\n \n Do note that you have to keep that `accelerate` folder around and not delete it to continue using the πŸ€— Accelerate library.\n \n-Now, let's get to the real benefit of this installation approach. Say, you saw some new feature has been just committed into `main`. If you have already performed all the steps above, to update your accelerate repo to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:\n+Now, let's get to the real benefit of this installation approach. Say, you saw some new feature just has been committed into `main`. If you have already performed all the steps above, to update your accelerate repo to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:\n \n ```bash\n cd ~/accelerate/\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 1c62f05f5..2e9cbe203 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.\n \n # Quick tour\n \n-Let's have a look at a look at πŸ€— Accelerate main features and traps to avoid.\n+Let's have a look at the πŸ€— Accelerate main features and traps to avoid.\n \n ## Main use\n \n@@ -54,7 +54,7 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n )\n ```\n \n-In particular, your training dataloader will be sharded accross all GPUs/TPU cores available so that each one sees a\n+In particular, your training dataloader will be sharded across all GPUs/TPU cores available so that each one sees a\n different portion of the training dataset. Also, the random states of all processes will be synchronized at the\n beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to\n use `shuffle=True` or any kind of random sampler).\n@@ -118,7 +118,7 @@ method:\n validation_dataloader = accelerator.prepare(validation_dataloader)\n ```\n \n-Like for your training dataloader, it will mean that (should you run your script on multiple devices) each device will\n+As for your training dataloader, it will mean that (should you run your script on multiple devices) each device will\n only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to\n do with the [`~Accelerator.gather`] method.\n \n@@ -134,8 +134,8 @@ for inputs, targets in validation_dataloader:\n \n <Tip warning={true}>\n \n-Like for the training dataloader, passing your validation dataloader through\n-[`~Accelerator.prepare`] may change its: if you run on X GPUs, it will have its length divided by X\n+As for the training dataloader, passing your validation dataloader through\n+[`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X\n (since your actual batch size will be multiplied by X), unless you set `split_batches=True`.\n \n Any instruction using your training dataloader length (for instance if you need the number of total training steps\n@@ -159,7 +159,7 @@ PyTorch), they are fully compatible with πŸ€— Accelerate. The only caveat here i\n to determine all useful information, so `torch.distributed.launch` should be used with the flag `--use_env`.\n \n πŸ€— Accelerate also provides a CLI tool that unifies all launcher, so you only have to remember one command. To use it,\n-just run\n+just run:\n \n ```bash\n accelerate config\n@@ -175,7 +175,7 @@ on your machine and reply to the questions asked. This will save a *default_conf\n \n You can also specify with the flag `--config_file` the location of the file you want to save.\n \n-Once this is done, you can test everything is going well on your setup by running\n+Once this is done, you can test everything is going well on your setup by running:\n \n ```bash\n accelerate test\n@@ -235,14 +235,14 @@ step). This is why your first step of training will always be very long as build\n optimizations takes some time.\n \n The good news is that this compilation will be cached so the second step and all the following will be much faster. The\n-bas news is that it only applies if all of your steps do exactly the same operations, which implies:\n+bad news is that it only applies if all of your steps do exactly the same operations, which implies:\n \n - having all tensors of the same length in all your lengths\n - having static code (i.e., not a for loop of length that could change from step to step)\n \n Having any of the things above change between two steps will trigger a new compilation which will, once again, take a\n lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same\n-shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layer with for loops that\n+shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that\n have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\n \n To introduce special behavior in your script for TPUs you can check the `distributed_type` of your\n@@ -257,10 +257,10 @@ else:\n # go crazy and be dynamic\n ```\n \n-The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in\n+The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in a \n situation with dynamic padding.\n \n-One last thing to pay close attnetion to: if your model has tied weights (such as language models which tie the weights\n+One last thing to pay close attention to: if your model has tied weights (such as language models which tie the weights\n of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you\n passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights\n after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in\n@@ -317,8 +317,8 @@ following line in your code:\n accelerator.wait_for_everyone()\n ```\n \n-This instruction will block all the processes that arrive them first until all the other processes have reached that\n-point (if you run your script on just one GPU or CPU, this wont' do anything).\n+This instruction will block all the processes that arrive first until all the other processes have reached that\n+point (if you run your script on just one GPU or CPU, this won't do anything).\n \n \n ### Saving/loading a model\n@@ -338,7 +338,7 @@ unwrapped_model = accelerator.unwrap_model(model)\n accelerator.save(unwrapped_model.state_dict(), filename)\n ```\n \n-If your script contains a logic to load checkpoint, we also recommend you load your weights in the unwrapped model\n+If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model\n (this is only useful if you use the load function after making your model go through\n [`~Accelerator.prepare`]). Here is an example:\n \n@@ -368,7 +368,7 @@ and `accelerator.clip_grad_value_` respectively.\n \n ### Mixed Precision training\n \n-If you are running your training in Mixed Precision with Accelerate, you will get the best result with your loss being\n+If you are running your training in Mixed Precision with πŸ€— Accelerate, you will get the best result with your loss being\n computed inside your model (like in Transformer models for instance). Every computation outside of the model will be\n executed in full precision (which is generally what you want for loss computation, expecially if it involves a\n softmax). However you might want to put your loss computation inside the *accelerator.autocast* context manager:\n@@ -438,14 +438,14 @@ The random number generator synchronization will by default synchronize:\n - the main random number generator in PyTorch <=1.5.1\n \n You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main\n-[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on local `generator` to avoid\n+[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid\n setting the same seed in the main random number generator in all processes.\n \n <Tip warning={true}>\n \n-Synchronization the main torch (or CUDA or XLA) random number generator will affect any other potential random\n-artifacts you could have in your dataset (like random data augmentation) in the sense all processes will get the\n-same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n+Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random\n+artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get\n+the same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n controlled by torch).\n \n </Tip>\n@@ -457,4 +457,4 @@ The randomization part of your custom sampler, batch sampler or iterable dataset\n \n </Tip>\n \n-See more details about the internal in the [Internals page](internal).\n+For more details about the internals, see the [Internals page](internal).\ndiff --git a/docs/source/sagemaker.mdx b/docs/source/sagemaker.mdx\nindex d55ccd28b..563f0c742 100644\n--- a/docs/source/sagemaker.mdx\n+++ b/docs/source/sagemaker.mdx\n@@ -23,7 +23,7 @@ make it easier than ever to train Hugging Face Transformer models in [Amazon Sag\n Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html).\n \n-After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with.\n+After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with:\n \n ```bash\n pip install \"accelerate[sagemaker]\" --upgrade\n@@ -31,7 +31,7 @@ pip install \"accelerate[sagemaker]\" --upgrade\n \n πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. πŸ€—\n Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a\n-`requirements.txt` in the same directory where your training script is located and add it as dependency.\n+`requirements.txt` in the same directory where your training script is located and add it as dependency:\n \n ```\n accelerate\n@@ -43,7 +43,7 @@ You should also add any other dependencies you have to this `requirements.txt`.\n ### Configure πŸ€— Accelerate\n \n You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n-the πŸ€— Accelerate CLI.\n+the πŸ€— Accelerate CLI:\n \n ```bash\n accelerate config\n@@ -62,7 +62,7 @@ accelerate config\n \n The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n after training you need to specify either `/opt/ml/model` or use `os.environ[\"SM_MODEL_DIR\"]` as your save\n-directory. After training, artifacts in this directory are uploaded to S3.\n+directory. After training, artifacts in this directory are uploaded to S3:\n \n \n ```diff\n@@ -79,7 +79,7 @@ specify type as bool in your script and provide an explicit True or False value\n \n ### Launch Training\n \n-You can launch your training with πŸ€— Accelerate CLI with\n+You can launch your training with πŸ€— Accelerate CLI with:\n \n ```\n accelerate launch path_to_script.py --args_to_the_script\ndiff --git a/docs/source/tracking.mdx b/docs/source/tracking.mdx\nindex 6d4ca437c..25e44b3c9 100644\n--- a/docs/source/tracking.mdx\n+++ b/docs/source/tracking.mdx\n@@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.\n # Tracking\n \n There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\n-Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n+πŸ€— Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n \n ## Integrated Trackers\n \ndiff --git a/docs/source/utilities.mdx b/docs/source/utilities.mdx\nindex f5756a262..bfcaeb60e 100644\n--- a/docs/source/utilities.mdx\n+++ b/docs/source/utilities.mdx\n@@ -12,11 +12,11 @@ specific language governing permissions and limitations under the License.\n \n # Helpful Utilities\n \n-Below are a variety of utility functions that Accelerate provides, broken down by use-case. \n+Below are a variety of utility functions that πŸ€— Accelerate provides, broken down by use-case. \n \n ## Data Classes\n \n-These are basic dataclasses used throughout Accelerate and can be passed in as parameters.\n+These are basic dataclasses used throughout πŸ€— Accelerate and they can be passed in as parameters.\n \n [[autodoc]] utils.DistributedType\n \n@@ -56,7 +56,7 @@ These functionalities check the state of the current working environment includi\n \n [[autodoc]] utils.write_basic_config\n \n-When setting up Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as alternative for quick configuration.\n+When setting up πŸ€— Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.\n \n ## Modeling\n \n@@ -88,4 +88,4 @@ These utilities relate to setting and synchronizing of all the random states.\n \n [[autodoc]] utils.synchronize_rng_state\n \n-[[autodoc]] utils.synchronize_rng_states\n\\ No newline at end of file\n+[[autodoc]] utils.synchronize_rng_states\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/472", "pr_id": 979186035 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2d2a0bd56..1b3fa27e9 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -68,7 +68,7 @@\n DummyScheduler,\n )\n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.distributed.xla_multiprocessing as xmp\n \n logger = get_logger(__name__)\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex 8b41c48f8..d5e816aab 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -33,7 +33,7 @@\n )\n \n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n \n from .logging import get_logger\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex eb26c3a60..806fb7ddc 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -34,7 +34,7 @@\n )\n \n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n \n \ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 3d5977968..046d5ce5b 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -21,7 +21,7 @@\n from .utils import DistributedType, honor_type, is_torch_version, is_tpu_available\n \n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n \n \ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 36b11be56..63527124c 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -20,7 +20,7 @@\n from .utils import DistributedType, is_ccl_available, is_deepspeed_available, is_tpu_available\n \n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n \n \n@@ -88,7 +88,7 @@ def __init__(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n \"before using any functionality from the `accelerate` library.\"\n )\n- elif is_tpu_available() and not cpu:\n+ if is_tpu_available() and not cpu:\n self.distributed_type = DistributedType.TPU\n self.num_processes = xm.xrt_world_size()\n self.process_index = xm.get_ordinal()\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 594f7aa1c..a660db858 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -38,12 +38,7 @@\n try:\n import torch_xla.core.xla_model as xm # noqa: F401\n \n- try:\n- # Will raise a RuntimeError if no XLA configuration is found\n- _ = xm.xla_device()\n- _tpu_available = True\n- except RuntimeError:\n- _tpu_available = False\n+ _tpu_available = True\n except ImportError:\n _tpu_available = False\n \n@@ -56,8 +51,15 @@ def is_apex_available():\n return importlib.util.find_spec(\"apex\") is not None\n \n \n-def is_tpu_available():\n- \"Checks if `torch_xla` is installed and if a TPU is in the environment\"\n+def is_tpu_available(check_device=True):\n+ \"Checks if `torch_xla` is installed and potentially if a TPU is in the environment\"\n+ if _tpu_available and check_device:\n+ try:\n+ # Will raise a RuntimeError if no XLA configuration is found\n+ _ = xm.xla_device()\n+ return True\n+ except RuntimeError:\n+ return False\n return _tpu_available\n \n \ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 954bb5dfd..46753513e 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -29,7 +29,7 @@\n from .versions import is_torch_version\n \n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n \n \ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex 0d4f5b7e2..206c70589 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -28,7 +28,7 @@\n if is_deepspeed_available():\n from deepspeed import DeepSpeedEngine\n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n \n \ndiff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\nindex 3958022a4..51b7c3719 100644\n--- a/src/accelerate/utils/random.py\n+++ b/src/accelerate/utils/random.py\n@@ -23,7 +23,7 @@\n from .imports import is_tpu_available\n \n \n-if is_tpu_available():\n+if is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n \n \n", "code_comments": [ { "body": "Mmm, removing this will break the other places we use `is_tpu_available`", "diff_hunk": "@@ -38,12 +38,7 @@\n try:\n import torch_xla.core.xla_model as xm # noqa: F401\n \n- try:\n- # Will raise a RuntimeError if no XLA configuration is found\n- _ = xm.xla_device()\n- _tpu_available = True\n- except RuntimeError:\n- _tpu_available = False", "from_author": false }, { "body": "The added reason was specifically inside `AcceleratorState`, at the proposed location. But having it as an argument instead works as well. Will refactor", "diff_hunk": "@@ -38,12 +38,7 @@\n try:\n import torch_xla.core.xla_model as xm # noqa: F401\n \n- try:\n- # Will raise a RuntimeError if no XLA configuration is found\n- _ = xm.xla_device()\n- _tpu_available = True\n- except RuntimeError:\n- _tpu_available = False", "from_author": true }, { "body": "This needs to be `True` by default, and `False` when we don't want to check for the device (before launching multiprocessing for instance).", "diff_hunk": "@@ -56,8 +51,15 @@ def is_apex_available():\n return importlib.util.find_spec(\"apex\") is not None\n \n \n-def is_tpu_available():\n- \"Checks if `torch_xla` is installed and if a TPU is in the environment\"\n+def is_tpu_available(check_device=False):", "from_author": false }, { "body": "It needs to be `False` by default, because otherwise it also does this on import checks that are scattered around the library.", "diff_hunk": "@@ -56,8 +51,15 @@ def is_apex_available():\n return importlib.util.find_spec(\"apex\") is not None\n \n \n-def is_tpu_available():\n- \"Checks if `torch_xla` is installed and if a TPU is in the environment\"\n+def is_tpu_available(check_device=False):", "from_author": true }, { "body": "Or made into a separate function if we don't want `False` behavior. (I know we're not fans of that, but this is one case where it should be `False`)", "diff_hunk": "@@ -56,8 +51,15 @@ def is_apex_available():\n return importlib.util.find_spec(\"apex\") is not None\n \n \n-def is_tpu_available():\n- \"Checks if `torch_xla` is installed and if a TPU is in the environment\"\n+def is_tpu_available(check_device=False):", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/469", "pr_id": 978424504 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 94516436b..e548a3940 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n from setuptools import find_packages\n \n extras = {}\n-extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n+extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\", \"hf-doc-builder >= 0.3.0\"]\n extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/468", "pr_id": 977496257 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 4b2dfac18..04571e50e 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -19,10 +19,10 @@\n title: Notebook Launcher\n - local: kwargs\n title: Kwargs Handlers\n- - local: internal\n- title: Internals\n - local: checkpoint\n title: Checkpointing\n+ - local: internal\n+ title: Internals\n - local: tracking\n title: Experiment Tracking\n - local: fsdp\n@@ -31,4 +31,6 @@\n title: Memory Utilities\n - local: deepspeed\n title: DeepSpeed\n+ - local: utilities\n+ title: General Utilities\n title: API Reference\ndiff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex 4f7f59500..e1eec36be 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -44,34 +44,6 @@ The main work on your PyTorch `DataLoader` is done by the following function:\n \n [[autodoc]] state.AcceleratorState\n \n-### DistributedType\n-\n-[[autodoc]] state.DistributedType\n-\n ## Tracking\n \n [[autodoc]] tracking.GeneralTracker\n-\n-## Utilities\n-\n-[[autodoc]] utils.extract_model_from_parallel\n-\n-[[autodoc]] utils.is_bf16_available\n-\n-[[autodoc]] utils.is_torch_version\n-\n-[[autodoc]] utils.is_tpu_available\n-\n-[[autodoc]] utils.gather\n-\n-[[autodoc]] utils.send_to_device\n-\n-[[autodoc]] utils.set_seed\n-\n-[[autodoc]] utils.synchronize_rng_state\n-\n-[[autodoc]] utils.synchronize_rng_states\n-\n-[[autodoc]] utils.wait_for_everyone\n-\n-[[autodoc]] utils.write_basic_config\ndiff --git a/docs/source/utilities.mdx b/docs/source/utilities.mdx\nnew file mode 100644\nindex 000000000..f5756a262\n--- /dev/null\n+++ b/docs/source/utilities.mdx\n@@ -0,0 +1,91 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Helpful Utilities\n+\n+Below are a variety of utility functions that Accelerate provides, broken down by use-case. \n+\n+## Data Classes\n+\n+These are basic dataclasses used throughout Accelerate and can be passed in as parameters.\n+\n+[[autodoc]] utils.DistributedType\n+\n+[[autodoc]] utils.LoggerType\n+\n+[[autodoc]] utils.PrecisionType\n+\n+## Data Manipulation and Operations\n+\n+These include data operations that mimic the same `torch` ops but can be used on distributed processes.\n+\n+[[autodoc]] utils.broadcast\n+\n+[[autodoc]] utils.concatenate\n+\n+[[autodoc]] utils.gather\n+\n+[[autodoc]] utils.pad_across_processes\n+\n+[[autodoc]] utils.reduce\n+\n+[[autodoc]] utils.send_to_device\n+\n+## Environment Checks\n+\n+These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed. \n+\n+[[autodoc]] utils.get_max_memory\n+\n+[[autodoc]] utils.is_bf16_available\n+\n+[[autodoc]] utils.is_torch_version\n+\n+[[autodoc]] utils.is_tpu_available\n+\n+## Environment Configuration\n+\n+[[autodoc]] utils.write_basic_config\n+\n+When setting up Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as alternative for quick configuration.\n+\n+## Modeling\n+\n+These utilities relate to interacting with PyTorch models\n+\n+[[autodoc]] utils.extract_model_from_parallel\n+\n+[[autodoc]] utils.get_max_layer_size\n+\n+[[autodoc]] utils.offload_state_dict\n+\n+\n+## Parallel\n+\n+These include general utilities that should be used when working in parallel.\n+\n+[[autodoc]] utils.extract_model_from_parallel\n+\n+[[autodoc]] utils.save\n+\n+[[autodoc]] utils.wait_for_everyone\n+\n+\n+## Random\n+\n+These utilities relate to setting and synchronizing of all the random states.\n+\n+[[autodoc]] utils.set_seed\n+\n+[[autodoc]] utils.synchronize_rng_state\n+\n+[[autodoc]] utils.synchronize_rng_states\n\\ No newline at end of file\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 8b8e30304..3ccf2012b 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -179,6 +179,16 @@ def list(cls):\n \n \n class LoggerType(BaseEnum):\n+ \"\"\"Represents a type of supported experiment tracker\n+\n+ Values:\n+\n+ - **ALL** -- all available trackers in the environment that are supported\n+ - **TENSORBOARD** -- TensorBoard as an experiment tracker\n+ - **WANDB** -- wandb as an experiment tracker\n+ - **COMETML** -- comet_ml as an experiment tracker\n+ \"\"\"\n+\n ALL = \"all\"\n TENSORBOARD = \"tensorboard\"\n WANDB = \"wandb\"\n@@ -186,6 +196,15 @@ class LoggerType(BaseEnum):\n \n \n class PrecisionType(BaseEnum):\n+ \"\"\"Represents a type of precision used on floating point values\n+\n+ Values:\n+\n+ - **NO** -- using full precision (FP32)\n+ - **FP16** -- using half precision\n+ - **BF16** -- using brain floating point precision\n+ \"\"\"\n+\n NO = \"no\"\n FP16 = \"fp16\"\n BF16 = \"bf16\"\n", "code_comments": [ { "body": "Would be nice to add more content to this page introducing each section!", "diff_hunk": "@@ -0,0 +1,77 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Helpful Utilities", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/467", "pr_id": 977300990 }, { "diff": "diff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex 9b496b30e..4f7f59500 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -56,6 +56,12 @@ The main work on your PyTorch `DataLoader` is done by the following function:\n \n [[autodoc]] utils.extract_model_from_parallel\n \n+[[autodoc]] utils.is_bf16_available\n+\n+[[autodoc]] utils.is_torch_version\n+\n+[[autodoc]] utils.is_tpu_available\n+\n [[autodoc]] utils.gather\n \n [[autodoc]] utils.send_to_device\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex d1d634355..594f7aa1c 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -57,6 +57,7 @@ def is_apex_available():\n \n \n def is_tpu_available():\n+ \"Checks if `torch_xla` is installed and if a TPU is in the environment\"\n return _tpu_available\n \n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/466", "pr_id": 977224429 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex c06d06ee9..3b8714911 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -16,18 +16,13 @@\n import os\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n from accelerate import Accelerator, DistributedType\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -117,7 +112,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n@@ -155,7 +149,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -297,7 +291,7 @@ def main():\n help=\"If the training should continue from a checkpoint folder.\",\n )\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex 53d582c67..f1f54d78d 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -17,6 +17,7 @@\n \n import numpy as np\n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n@@ -26,13 +27,7 @@\n # New Code #\n # We'll be using StratifiedKFold for this example\n from sklearn.model_selection import StratifiedKFold\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -141,7 +136,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n@@ -178,7 +172,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -268,7 +262,7 @@ def main():\n # New Code #\n parser.add_argument(\"--num_folds\", type=int, default=3, help=\"The number of splits to perform across the dataset\")\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex 38da07aa1..d0a044bcd 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -16,18 +16,13 @@\n import os\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n from accelerate import Accelerator, DistributedType\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -112,7 +107,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n # New Code #\n@@ -131,7 +125,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -210,7 +204,7 @@ def main():\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex ac4b45866..7aef8e1ff 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -15,6 +15,7 @@\n import os\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n # New Code #\n@@ -22,13 +23,7 @@\n from accelerate import Accelerator, DistributedType\n from accelerate.utils import find_executable_batch_size\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -117,7 +112,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n@@ -139,7 +133,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # New Code #\n # We now can define an inner training loop function. It should take a batch size as the only parameter,\n@@ -218,7 +212,7 @@ def main():\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex 0307d251f..788c54f50 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -16,18 +16,13 @@\n import os\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n from accelerate import Accelerator, DistributedType\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -119,7 +114,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n@@ -142,7 +136,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -216,7 +210,7 @@ def main():\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 4020b3382..3b7b4c8d4 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -16,18 +16,13 @@\n import os\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n from accelerate import Accelerator, DistributedType\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -127,7 +122,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n set_seed(seed)\n@@ -150,7 +144,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -260,7 +254,7 @@ def main():\n help=\"Location on where to store experiment tracking logs`\",\n )\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 8544d1279..f13fdfd27 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -16,18 +16,13 @@\n import os\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n from accelerate import Accelerator, DistributedType\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -76,7 +71,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n@@ -139,7 +133,7 @@ def collate_fn(examples):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -305,7 +299,7 @@ def main():\n help=\"Location on where to store experiment tracking logs`\",\n )\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 84c24fd11..00713e886 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -15,18 +15,13 @@\n import argparse\n \n import torch\n+from torch.optim import AdamW\n from torch.utils.data import DataLoader\n \n import evaluate\n from accelerate import Accelerator, DistributedType\n from datasets import load_dataset\n-from transformers import (\n- AdamW,\n- AutoModelForSequenceClassification,\n- AutoTokenizer,\n- get_linear_schedule_with_warmup,\n- set_seed,\n-)\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n ########################################################################\n@@ -103,7 +98,6 @@ def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n@@ -126,7 +120,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr)\n \n # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n@@ -188,7 +182,7 @@ def main():\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n- config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n \n \ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 0329f8786..62c4f9761 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -78,7 +78,7 @@ def dtype_byte_size(dtype: torch.dtype):\n \"\"\"\n if dtype == torch.bool:\n return 1 / 8\n- bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ bit_search = re.search(r\"[^\\d](\\d+)$\", str(dtype))\n if bit_search is None:\n raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n bit_size = int(bit_search.groups()[0])\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex f2414dfe5..1e9d18c17 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -23,7 +23,7 @@\n from accelerate.utils import convert_outputs_to_fp32, find_device, patch_environment, send_to_device\n \n \n-TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b c\")\n+ExampleNamedTuple = namedtuple(\"ExampleNamedTuple\", \"a b c\")\n \n \n class UtilsTester(unittest.TestCase):\n@@ -50,8 +50,8 @@ def test_send_to_device(self):\n self.assertTrue(torch.equal(result2[\"b\"][1].cpu(), tensor))\n self.assertEqual(result2[\"c\"], 1)\n \n- result3 = send_to_device(TestNamedTuple(a=tensor, b=[tensor, tensor], c=1), device)\n- self.assertIsInstance(result3, TestNamedTuple)\n+ result3 = send_to_device(ExampleNamedTuple(a=tensor, b=[tensor, tensor], c=1), device)\n+ self.assertIsInstance(result3, ExampleNamedTuple)\n self.assertTrue(torch.equal(result3.a.cpu(), tensor))\n self.assertIsInstance(result3.b, list)\n self.assertTrue(torch.equal(result3.b[0].cpu(), tensor))\n", "code_comments": [ { "body": "Switch to the torch `AdamW`? The HF ones as small bugs that can hurt larger trainings.", "diff_hunk": "@@ -155,7 +155,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias, no_deprecation_warning=True)", "from_author": false }, { "body": "We don't set `correct_bias=False`, so makes sense. Will update", "diff_hunk": "@@ -155,7 +155,7 @@ def training_function(config, args):\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n- optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias, no_deprecation_warning=True)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/465", "pr_id": 977134393 }, { "diff": "diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/check_dependencies.yml\nindex c4818ba85..1e8a4e1bb 100644\n--- a/.github/workflows/check_dependencies.yml\n+++ b/.github/workflows/check_dependencies.yml\n@@ -1,4 +1,4 @@\n-name: Check for dependency modification\n+name: Trigger docker images and run slow tests\n \n on:\n push:\n@@ -37,4 +37,9 @@ jobs:\n needs: check-for-setup\n if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\n uses: ./.github/workflows/build-docker-images.yml\n- secrets: inherit\n\\ No newline at end of file\n+ secrets: inherit\n+\n+ run-tests:\n+ needs: build-docker-containers\n+ if: always()\n+ uses: ./.github/workflows/on-merge.yml\n\\ No newline at end of file\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nindex f8b9ccddb..b95474758 100644\n--- a/.github/workflows/on-merge.yml\n+++ b/.github/workflows/on-merge.yml\n@@ -1,10 +1,7 @@\n-name: Self-hosted runner (push to \"main\")\n+name: Self-hosted runner tests (push to \"main\")\n \n on:\n- workflow_run:\n- workflows: [\"Check for dependency modification\"]\n- branches: [\"main\"]\n- types: [completed]\n+ workflow_call:\n workflow_dispatch:\n \n env:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/464", "pr_id": 977093205 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex c4487f6b6..2d2a0bd56 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -254,7 +254,7 @@ def __init__(\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n elif self.state.mixed_precision == \"bf16\":\n self.native_amp = is_bf16_available(True)\n- if mixed_precision == \"bf16\" and not self.native_amp:\n+ if mixed_precision == \"bf16\" and not self.native_amp and not is_tpu_available():\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n \n # Only on the GPU do we care about scaling the gradients\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 95b9eb30a..36b11be56 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -80,7 +80,9 @@ def __init__(\n if not getattr(self, \"initialized\", False):\n self.backend = None\n self.deepspeed_plugin = None\n- mixed_precision = mixed_precision.lower() if mixed_precision else None\n+ mixed_precision = (\n+ parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision.lower()\n+ )\n if not _from_accelerator:\n raise ValueError(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/463", "pr_id": 977050761 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 7b6c54608..c4487f6b6 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -46,6 +46,7 @@\n extract_model_from_parallel,\n gather,\n get_pretty_name,\n+ is_bf16_available,\n is_deepspeed_available,\n is_torch_version,\n is_tpu_available,\n@@ -242,20 +243,24 @@ def __init__(\n # Mixed precision attributes\n self.scaler = None\n self.native_amp = False\n+ err = \"{mode} mixed precision requires {requirement}\"\n if self.state.mixed_precision == \"fp16\":\n self.native_amp = is_torch_version(\">=\", \"1.6\")\n if not self.native_amp:\n- raise ValueError(\"fp16 mixed precision requires PyTorch >= 1.6\")\n-\n+ raise ValueError(err.format(mode=\"fp16\", requirement=\"PyTorch >= 1.6\"))\n+ if not torch.cuda.is_available():\n+ raise ValueError(err.format(mode=\"fp16\", requirement=\"a GPU\"))\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n elif self.state.mixed_precision == \"bf16\":\n- self.native_amp = is_torch_version(\">=\", \"1.10\")\n+ self.native_amp = is_bf16_available(True)\n if mixed_precision == \"bf16\" and not self.native_amp:\n- raise ValueError(\"bf16 mixed precision requires PyTorch >= 1.10\")\n+ raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n \n- kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n- self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n+ # Only on the GPU do we care about scaling the gradients\n+ if torch.cuda.is_available():\n+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n \n # Internal references to the training objects\n self._optimizers = []\n@@ -528,8 +533,9 @@ def prepare_model(self, model):\n if self.native_amp:\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n- elif self.mixed_precision == \"bf16\":\n- model.forward = torch.cuda.amp.autocast(dtype=torch.bfloat16)(model.forward)\n+ elif self.mixed_precision == \"bf16\" and self.distributed_type != DistributedType.TPU:\n+ device_type = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n+ model.forward = torch.autocast(device_type=device_type, dtype=torch.bfloat16)(model.forward)\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n@@ -1054,8 +1060,10 @@ def autocast(self):\n if self.native_amp:\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n- elif self.mixed_precision == \"bf16\":\n- autocast_context = torch.cuda.amp.autocast(dtype=torch.bfloat16)\n+ elif self.mixed_precision == \"bf16\" and is_bf16_available():\n+ if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\n+ device_type = \"cpu\" if not torch.cuda.is_available() else \"cuda\"\n+ autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=device_type)\n else:\n autocast_context = torch.cuda.amp.autocast()\n \ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex 298db439b..8ddc9af34 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -50,6 +50,13 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n else:\n in_colab_or_kaggle = False\n \n+ try:\n+ mixed_precision = PrecisionType(mixed_precision.lower())\n+ except ValueError:\n+ raise ValueError(\n+ f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n+ )\n+\n if in_colab_or_kaggle:\n if os.environ.get(\"TPU_NAME\", None) is not None:\n # TPU launch\n@@ -72,7 +79,7 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n if torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n- print(\"Launching training on CPU.\")\n+ print(\"Launching training on one CPU.\")\n function(*args)\n \n else:\n@@ -105,13 +112,6 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n \"function.\"\n )\n \n- try:\n- mixed_precision = PrecisionType(mixed_precision.lower())\n- except ValueError:\n- raise ValueError(\n- f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n- )\n-\n if use_fp16:\n warnings.warn('use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 06e9cb8fa..95b9eb30a 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -92,9 +92,9 @@ def __init__(\n self.process_index = xm.get_ordinal()\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n+ if mixed_precision == \"bf16\":\n+ os.environ[\"XLA_USE_BF16\"] = str(1)\n+ self.mixed_precision = mixed_precision\n elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n assert (\n is_deepspeed_available()\n@@ -120,9 +120,7 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n+ self.mixed_precision = mixed_precision\n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n self.distributed_type = DistributedType.FSDP\n if self.mixed_precision != \"no\":\n@@ -166,15 +164,13 @@ def __init__(\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = local_rank\n self.device = torch.device(\"cpu\")\n- self.mixed_precision = \"no\"\n+ self.mixed_precision = mixed_precision\n else:\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n self.process_index = self.local_process_index = 0\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n+ self.mixed_precision = mixed_precision\n self.initialized = True\n \n def __repr__(self):\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\nindex 018564fa0..07d794b42 100644\n--- a/src/accelerate/test_utils/scripts/test_script.py\n+++ b/src/accelerate/test_utils/scripts/test_script.py\n@@ -21,7 +21,14 @@\n from accelerate.data_loader import prepare_data_loader\n from accelerate.state import AcceleratorState\n from accelerate.test_utils import RegressionDataset, RegressionModel, are_the_same_tensors\n-from accelerate.utils import DistributedType, gather, is_torch_version, set_seed, synchronize_rng_states\n+from accelerate.utils import (\n+ DistributedType,\n+ gather,\n+ is_bf16_available,\n+ is_torch_version,\n+ set_seed,\n+ synchronize_rng_states,\n+)\n \n \n def init_state_check():\n@@ -245,74 +252,77 @@ def training_check():\n \n accelerator.print(\"Training yielded the same results on one CPU or distributes setup with batch split.\")\n \n- # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n- print(\"FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(mixed_precision=\"fp16\")\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n- # TEST that previous fp16 flag still works\n- print(\"Legacy FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(fp16=True)\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n- # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n- print(\"BF16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(mixed_precision=\"bf16\")\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+ if torch.cuda.is_available():\n+ # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n+ print(\"FP16 training check.\")\n+ AcceleratorState._reset_state()\n+ accelerator = Accelerator(mixed_precision=\"fp16\")\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+\n+ # TEST that previous fp16 flag still works\n+ print(\"Legacy FP16 training check.\")\n+ AcceleratorState._reset_state()\n+ accelerator = Accelerator(fp16=True)\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+\n+ # BF16 support is only for CPU + TPU, and some GPU\n+ if is_bf16_available():\n+ # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n+ print(\"BF16 training check.\")\n+ AcceleratorState._reset_state()\n+ accelerator = Accelerator(mixed_precision=\"bf16\")\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n \n def main():\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 309b49023..350efc899 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -20,6 +20,7 @@\n )\n from .imports import (\n is_apex_available,\n+ is_bf16_available,\n is_boto3_available,\n is_ccl_available,\n is_comet_ml_available,\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 0a44a093f..d1d634355 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -15,6 +15,10 @@\n import importlib\n import sys\n \n+import torch\n+\n+from .versions import is_torch_version\n+\n \n # The package importlib_metadata is in a different place, depending on the Python version.\n if sys.version_info < (3, 8):\n@@ -68,6 +72,17 @@ def is_deepspeed_available():\n return False\n \n \n+def is_bf16_available(ignore_tpu=False):\n+ \"Checks if bf16 is supported, optionally ignoring the TPU\"\n+ if is_tpu_available():\n+ return not ignore_tpu\n+ if is_torch_version(\">=\", \"1.10\"):\n+ if torch.cuda.is_available():\n+ return torch.cuda.is_bf16_supported()\n+ return True\n+ return False\n+\n+\n def is_transformers_available():\n return importlib.util.find_spec(\"transformers\") is not None\n \ndiff --git a/tests/test_hooks.py b/tests/test_hooks.py\nindex 51c434ab1..2e0519668 100644\n--- a/tests/test_hooks.py\n+++ b/tests/test_hooks.py\n@@ -77,20 +77,20 @@ def test_pre_forward_hook_is_executed(self):\n test_hook = PreForwardHook()\n add_hook_to_module(test_model, test_hook)\n output1 = test_model(x)\n- self.assertTrue(torch.allclose(output1, expected))\n+ self.assertTrue(torch.allclose(output1, expected, atol=1e-5))\n \n # Attaching a hook to a model when it already has one replaces, does not chain\n test_hook = PreForwardHook()\n add_hook_to_module(test_model, test_hook)\n output1 = test_model(x)\n- self.assertTrue(torch.allclose(output1, expected))\n+ self.assertTrue(torch.allclose(output1, expected, atol=1e-5))\n \n # You need to use the sequential hook to chain two or more hooks\n test_hook = SequentialHook(PreForwardHook(), PreForwardHook())\n add_hook_to_module(test_model, test_hook)\n \n output2 = test_model(x)\n- assert torch.allclose(output2, expected2)\n+ assert torch.allclose(output2, expected2, atol=1e-5)\n \n def test_post_forward_hook_is_executed(self):\n test_model = ModelForTest()\n@@ -100,20 +100,20 @@ def test_post_forward_hook_is_executed(self):\n test_hook = PostForwardHook()\n add_hook_to_module(test_model, test_hook)\n output1 = test_model(x)\n- self.assertTrue(torch.allclose(output1, output + 1))\n+ self.assertTrue(torch.allclose(output1, output + 1, atol=1e-5))\n \n # Attaching a hook to a model when it already has one replaces, does not chain\n test_hook = PostForwardHook()\n add_hook_to_module(test_model, test_hook)\n output1 = test_model(x)\n- self.assertTrue(torch.allclose(output1, output + 1))\n+ self.assertTrue(torch.allclose(output1, output + 1, atol=1e-5))\n \n # You need to use the sequential hook to chain two or more hooks\n test_hook = SequentialHook(PostForwardHook(), PostForwardHook())\n add_hook_to_module(test_model, test_hook)\n \n output2 = test_model(x)\n- assert torch.allclose(output2, output + 2)\n+ assert torch.allclose(output2, output + 2, atol=1e-5)\n \n def test_no_grad_in_hook(self):\n test_model = ModelForTest()\n", "code_comments": [ { "body": "Need to be extra sure that this always exists for PyTorch version for which `is_bf16_available()`", "diff_hunk": "@@ -1054,8 +1060,10 @@ def autocast(self):\n if self.native_amp:\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n- elif self.mixed_precision == \"bf16\":\n- autocast_context = torch.cuda.amp.autocast(dtype=torch.bfloat16)\n+ elif self.mixed_precision == \"bf16\" and is_bf16_available():\n+ if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\n+ device_type = \"cpu\" if not torch.cuda.is_available() else \"cuda\"\n+ autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=device_type)", "from_author": false }, { "body": "Not sure why the period is removed here, but I like them at the end of sentences :-p", "diff_hunk": "@@ -65,14 +72,14 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n num_processes = 8\n \n launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n- print(f\"Launching a training on {num_processes} TPU cores.\")\n+ print(f\"Launching a training on {num_processes} TPU cores\")", "from_author": false }, { "body": "Do we want to set anything if `mixed_precision != \"bf16\"`? I'd remove the else statement. But if you really want it, then use one line for such a small test.", "diff_hunk": "@@ -92,9 +92,11 @@ def __init__(\n self.process_index = xm.get_ordinal()\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n+ if mixed_precision == \"bf16\":\n+ os.environ[\"XLA_USE_BF16\"] = 1\n+ else:\n+ os.environ[\"XLA_USE_BF16\"] = 0", "from_author": false }, { "body": "Not sure we need to keep this. We have done a couple of releases since we deprecated it, so it's okay if we stop testing it IMO.", "diff_hunk": "@@ -245,74 +252,77 @@ def training_check():\n \n accelerator.print(\"Training yielded the same results on one CPU or distributes setup with batch split.\")\n \n- # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n- print(\"FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(mixed_precision=\"fp16\")\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n- # TEST that previous fp16 flag still works\n- print(\"Legacy FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(fp16=True)\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n- # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n- print(\"BF16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(mixed_precision=\"bf16\")\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+ if torch.cuda.is_available():\n+ # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n+ print(\"FP16 training check.\")\n+ AcceleratorState._reset_state()\n+ accelerator = Accelerator(mixed_precision=\"fp16\")\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+\n+ # TEST that previous fp16 flag still works\n+ print(\"Legacy FP16 training check.\")", "from_author": false }, { "body": "That's from me messing with some things, put it back!", "diff_hunk": "@@ -65,14 +72,14 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n num_processes = 8\n \n launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n- print(f\"Launching a training on {num_processes} TPU cores.\")\n+ print(f\"Launching a training on {num_processes} TPU cores\")", "from_author": true }, { "body": "Thought about this for a while, and I'm just going to keep it as assigning if `==`, so users can still set the env var themselves if they're more comfortable/familiar with it. ", "diff_hunk": "@@ -92,9 +92,11 @@ def __init__(\n self.process_index = xm.get_ordinal()\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n+ if mixed_precision == \"bf16\":\n+ os.environ[\"XLA_USE_BF16\"] = 1\n+ else:\n+ os.environ[\"XLA_USE_BF16\"] = 0", "from_author": true }, { "body": "Fixed by adding a torch check for >= 1.10", "diff_hunk": "@@ -1054,8 +1060,10 @@ def autocast(self):\n if self.native_amp:\n if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n- elif self.mixed_precision == \"bf16\":\n- autocast_context = torch.cuda.amp.autocast(dtype=torch.bfloat16)\n+ elif self.mixed_precision == \"bf16\" and is_bf16_available():\n+ if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\n+ device_type = \"cpu\" if not torch.cuda.is_available() else \"cuda\"\n+ autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=device_type)", "from_author": true }, { "body": "I'd feel more comfortable dropping the test once we've removed entirely the legacy param (whenever that may be)", "diff_hunk": "@@ -245,74 +252,77 @@ def training_check():\n \n accelerator.print(\"Training yielded the same results on one CPU or distributes setup with batch split.\")\n \n- # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n- print(\"FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(mixed_precision=\"fp16\")\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n- # TEST that previous fp16 flag still works\n- print(\"Legacy FP16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(fp16=True)\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n-\n- # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n- print(\"BF16 training check.\")\n- AcceleratorState._reset_state()\n- accelerator = Accelerator(mixed_precision=\"bf16\")\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n- model = RegressionModel()\n- optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n-\n- train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n- set_seed(42)\n- generator.manual_seed(42)\n- for _ in range(3):\n- for batch in train_dl:\n- model.zero_grad()\n- output = model(batch[\"x\"])\n- loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n- accelerator.backward(loss)\n- optimizer.step()\n-\n- model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n- assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+ if torch.cuda.is_available():\n+ # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n+ print(\"FP16 training check.\")\n+ AcceleratorState._reset_state()\n+ accelerator = Accelerator(mixed_precision=\"fp16\")\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+\n+ # TEST that previous fp16 flag still works\n+ print(\"Legacy FP16 training check.\")", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/462", "pr_id": 975343546 }, { "diff": "diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex 6e8b34bbc..750ff9d57 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -25,8 +25,8 @@ def offload_weight(weight, weight_name, offload_folder, index=None):\n dtype = None\n # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16.\n if str(weight.dtype) == \"torch.bfloat16\":\n- # Need to convert to FP32 since NumPy does not handle bfloat16s.\n- weight = weight.float()\n+ # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.\n+ weight = weight.view(torch.int16)\n dtype = \"bfloat16\"\n array = weight.numpy()\n tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\n@@ -50,8 +50,8 @@ def load_offloaded_weight(weight_file, weight_info):\n \n dtype = weight_info[\"dtype\"]\n if dtype == \"bfloat16\":\n- # NumPy does not support bfloat16 so this was saved as a float32\n- dtype = \"float32\"\n+ # NumPy does not support bfloat16 so this was saved as a int16\n+ dtype = \"int16\"\n \n weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode=\"r\")\n \n@@ -59,7 +59,7 @@ def load_offloaded_weight(weight_file, weight_info):\n weight = weight[0]\n weight = torch.tensor(weight)\n if weight_info[\"dtype\"] == \"bfloat16\":\n- weight = weight.to(torch.bfloat16)\n+ weight = weight.view(torch.bfloat16)\n \n return weight\n \n", "code_comments": [], "context": [ { "body": "This PR handles a bug reported in https://github.com/huggingface/accelerate/issues/454 concerning bfloat16 and disk offload.", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/461", "pr_id": 975069155 }, { "diff": "diff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 60f39c90a..309b49023 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -48,6 +48,7 @@\n OffloadedWeightsLoader,\n PrefixedDataset,\n extract_submodules_state_dict,\n+ load_offloaded_weight,\n offload_state_dict,\n offload_weight,\n save_offload_index,\ndiff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex a562e9283..6e8b34bbc 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -22,10 +22,18 @@\n \n \n def offload_weight(weight, weight_name, offload_folder, index=None):\n+ dtype = None\n+ # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16.\n+ if str(weight.dtype) == \"torch.bfloat16\":\n+ # Need to convert to FP32 since NumPy does not handle bfloat16s.\n+ weight = weight.float()\n+ dtype = \"bfloat16\"\n array = weight.numpy()\n tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\n if index is not None:\n- index[weight_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ if dtype is None:\n+ dtype = str(array.dtype)\n+ index[weight_name] = {\"dtype\": dtype, \"shape\": list(array.shape)}\n if array.ndim == 0:\n array = array[None]\n file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n@@ -34,6 +42,28 @@ def offload_weight(weight, weight_name, offload_folder, index=None):\n return index\n \n \n+def load_offloaded_weight(weight_file, weight_info):\n+ shape = tuple(weight_info[\"shape\"])\n+ if shape == ():\n+ # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor\n+ shape = (1,)\n+\n+ dtype = weight_info[\"dtype\"]\n+ if dtype == \"bfloat16\":\n+ # NumPy does not support bfloat16 so this was saved as a float32\n+ dtype = \"float32\"\n+\n+ weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode=\"r\")\n+\n+ if len(weight_info[\"shape\"]) == 0:\n+ weight = weight[0]\n+ weight = torch.tensor(weight)\n+ if weight_info[\"dtype\"] == \"bfloat16\":\n+ weight = weight.to(torch.bfloat16)\n+\n+ return weight\n+\n+\n def save_offload_index(index, offload_folder):\n if index is None or len(index) == 0:\n # Nothing to save\n@@ -129,12 +159,7 @@ def __getitem__(self, key: str):\n return self.state_dict[key]\n weight_info = self.index[key]\n weight_file = os.path.join(self.save_folder, f\"{key}.dat\")\n- shape = tuple(weight_info[\"shape\"])\n- if shape == ():\n- weight = np.memmap(weight_file, dtype=weight_info[\"dtype\"], shape=(1,), mode=\"r\")[0]\n- else:\n- weight = np.memmap(weight_file, dtype=weight_info[\"dtype\"], shape=shape, mode=\"r\")\n- return torch.tensor(weight)\n+ return load_offloaded_weight(weight_file, weight_info)\n \n def __iter__(self):\n return iter(self.all_keys)\ndiff --git a/tests/test_offload.py b/tests/test_offload.py\nindex 515d5094d..765a9685d 100644\n--- a/tests/test_offload.py\n+++ b/tests/test_offload.py\n@@ -19,7 +19,13 @@\n import torch\n import torch.nn as nn\n \n-from accelerate.utils import OffloadedWeightsLoader, offload_state_dict\n+from accelerate.utils import (\n+ OffloadedWeightsLoader,\n+ is_torch_version,\n+ load_offloaded_weight,\n+ offload_state_dict,\n+ offload_weight,\n+)\n \n \n class ModelForTest(nn.Module):\n@@ -35,8 +41,6 @@ def forward(self, x):\n \n class OffloadTester(unittest.TestCase):\n def test_offload_state_dict(self):\n- from tempfile import TemporaryDirectory\n-\n model = ModelForTest()\n with TemporaryDirectory() as tmp_dir:\n offload_state_dict(tmp_dir, model.state_dict())\n@@ -49,6 +53,22 @@ def test_offload_state_dict(self):\n self.assertTrue(os.path.isfile(weight_file))\n # TODO: add tests on the fact weights are properly loaded\n \n+ def test_offload_weight(self):\n+ dtypes = [torch.float16, torch.float32]\n+ if is_torch_version(\">=\", \"1.10\"):\n+ dtypes.append(torch.bfloat16)\n+\n+ for dtype in dtypes:\n+ weight = torch.randn(2, 3, dtype=dtype)\n+ with TemporaryDirectory() as tmp_dir:\n+ index = offload_weight(weight, \"weight\", tmp_dir, {})\n+ weight_file = os.path.join(tmp_dir, \"weight.dat\")\n+ self.assertTrue(os.path.isfile(weight_file))\n+ self.assertDictEqual(index, {\"weight\": {\"shape\": [2, 3], \"dtype\": str(dtype).split(\".\")[1]}})\n+\n+ new_weight = load_offloaded_weight(weight_file, index[\"weight\"])\n+ self.assertTrue(torch.equal(weight, new_weight))\n+\n def test_offload_weights_loader(self):\n model = ModelForTest()\n state_dict = model.state_dict()\n", "code_comments": [ { "body": "Since we care about the prevision here, would it be a good idea to test these with floats that marginally get close to the float16 reach to ensure they're lossless? \r\n\r\nE.g.:\r\n```python\r\nweight = torch.rand((2,3), dtype=dtype)\r\n```\r\n\r\n", "diff_hunk": "@@ -49,6 +53,22 @@ def test_offload_state_dict(self):\n self.assertTrue(os.path.isfile(weight_file))\n # TODO: add tests on the fact weights are properly loaded\n \n+ def test_offload_weight(self):\n+ dtypes = [torch.float16, torch.float32]\n+ if is_torch_version(\">=\", \"1.10\"):\n+ dtypes.append(torch.bfloat16)\n+\n+ for dtype in dtypes:\n+ weight = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dtype)", "from_author": false }, { "body": "Probably yes πŸ˜… ", "diff_hunk": "@@ -49,6 +53,22 @@ def test_offload_state_dict(self):\n self.assertTrue(os.path.isfile(weight_file))\n # TODO: add tests on the fact weights are properly loaded\n \n+ def test_offload_weight(self):\n+ dtypes = [torch.float16, torch.float32]\n+ if is_torch_version(\">=\", \"1.10\"):\n+ dtypes.append(torch.bfloat16)\n+\n+ for dtype in dtypes:\n+ weight = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dtype)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/460", "pr_id": 974082132 }, { "diff": "diff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nindex bb8aba326..38da07aa1 100644\n--- a/examples/by_feature/gradient_accumulation.py\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -157,7 +157,7 @@ def training_function(config, args):\n # We use the new `no_sync` context manager to prevent gradient averaging\n # until we want to at the proper step if we happen to be in a distributed setup\n # otherwise it does nothing\n- if step % gradient_accumulation_steps == 0:\n+ if step % gradient_accumulation_steps != 0:\n # Accumulate gradients locally\n with accelerator.no_sync(model):\n output = model(**batch)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/459", "pr_id": 973991361 }, { "diff": "diff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex e341ad93f..06e9cb8fa 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -75,6 +75,7 @@ def __init__(\n self.__dict__ = self._shared_state\n if parse_flag_from_env(\"USE_CPU\"):\n cpu = True\n+ self._check_initialized(mixed_precision, cpu)\n self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n if not getattr(self, \"initialized\", False):\n self.backend = None\n@@ -196,3 +197,17 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def _reset_state():\n+ \"Resets `_shared_state`, is used internally and should not be called\"\n+ AcceleratorState._shared_state = {}\n+\n+ def _check_initialized(self, mixed_precision=None, cpu=None):\n+ \"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized\"\n+ if getattr(self, \"initialized\", False):\n+ err = \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`.\"\n+ if cpu and self.device.type != \"cpu\":\n+ raise ValueError(err.format(flag=\"cpu=True\"))\n+ if mixed_precision is not None and mixed_precision != self.mixed_precision:\n+ raise ValueError(err.format(flag=f\"mixed_precision='{mixed_precision}'\"))\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\nindex 8ed869edc..018564fa0 100644\n--- a/src/accelerate/test_utils/scripts/test_script.py\n+++ b/src/accelerate/test_utils/scripts/test_script.py\n@@ -247,6 +247,7 @@ def training_check():\n \n # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n print(\"FP16 training check.\")\n+ AcceleratorState._reset_state()\n accelerator = Accelerator(mixed_precision=\"fp16\")\n train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n@@ -269,6 +270,7 @@ def training_check():\n \n # TEST that previous fp16 flag still works\n print(\"Legacy FP16 training check.\")\n+ AcceleratorState._reset_state()\n accelerator = Accelerator(fp16=True)\n train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n@@ -291,6 +293,7 @@ def training_check():\n \n # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n print(\"BF16 training check.\")\n+ AcceleratorState._reset_state()\n accelerator = Accelerator(mixed_precision=\"bf16\")\n train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n", "code_comments": [ { "body": "I don't understand why we are using this class, when you can just do a regular string with flag between curly brackets and then use the format method.", "diff_hunk": "@@ -196,3 +198,14 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ def _check_initialized(self, mixed_precision=None, cpu=None):\n+ \"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized\"\n+ if getattr(self, \"initialized\", False):\n+ template = Template(\n+ \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `$flag` to `Accelerate()`.\"\n+ )", "from_author": false }, { "body": "Definitely not because I forgot strings could do this... πŸ‘€ ", "diff_hunk": "@@ -196,3 +198,14 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ def _check_initialized(self, mixed_precision=None, cpu=None):\n+ \"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized\"\n+ if getattr(self, \"initialized\", False):\n+ template = Template(\n+ \"AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `$flag` to `Accelerate()`.\"\n+ )", "from_author": true }, { "body": "Should be a `staticmethod` no?", "diff_hunk": "@@ -196,3 +197,17 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @classmethod", "from_author": false }, { "body": "And here you should change the value of `AcceleratorState._shared_state`", "diff_hunk": "@@ -196,3 +197,17 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @classmethod\n+ def _reset_state(cls):\n+ \"Resets `_shared_state`, is used internally and should not be called\"\n+ cls._shared_state = {}", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/458", "pr_id": 973944450 }, { "diff": "diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex c8cce9587..0a44a093f 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -34,7 +34,12 @@\n try:\n import torch_xla.core.xla_model as xm # noqa: F401\n \n- _tpu_available = True\n+ try:\n+ # Will raise a RuntimeError if no XLA configuration is found\n+ _ = xm.xla_device()\n+ _tpu_available = True\n+ except RuntimeError:\n+ _tpu_available = False\n except ImportError:\n _tpu_available = False\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/456", "pr_id": 973736883 }, { "diff": "diff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex b043d5d20..8b41c48f8 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -116,7 +116,7 @@ def load_accelerator_state(input_dir, models, optimizers, schedulers, process_in\n Args:\n input_dir (`str` or `os.PathLike`):\n The name of the folder to load all relevant weights and states.\n- model_stmodelsates (`List[torch.nn.Module]`):\n+ models (`List[torch.nn.Module]`):\n A list of model instances\n optimizers (`List[torch.optim.Optimizer]`):\n A list of optimizer instances\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/447", "pr_id": 968449920 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 45c4ae912..93916bd9f 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,6 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = []\n extras[\"test\"] = [\n- \"psutil\",\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n@@ -60,7 +59,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "code_comments": [], "context": [ { "body": "Thanks! LG2M!", "from_author": false }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/445", "pr_id": 968284995 }, { "diff": "diff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nindex 9db015427..4014e3972 100644\n--- a/src/accelerate/big_modeling.py\n+++ b/src/accelerate/big_modeling.py\n@@ -88,6 +88,7 @@ def cpu_offload(\n execution_device: Optional[torch.device] = None,\n offload_buffers: bool = False,\n state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n@@ -104,13 +105,23 @@ def cpu_offload(\n Whether or not to offload the buffers with the model parameters.\n state_dict (`Dict[str, torch.Tensor]`, *optional*):\n The state dict of the model that will be kept on CPU.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n if execution_device is None:\n execution_device = next(iter(model.parameters())).device\n if state_dict is None:\n state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n attach_align_device_hook(\n- model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=state_dict,\n+ preload_module_classes=preload_module_classes,\n )\n add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n return model\n@@ -121,6 +132,7 @@ def disk_offload(\n offload_dir: Union[str, os.PathLike],\n execution_device: Optional[torch.device] = None,\n offload_buffers: bool = False,\n+ preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n@@ -136,6 +148,11 @@ def disk_offload(\n model's first parameter device.\n offload_buffers (`bool`, *optional*, defaults to `False`):\n Whether or not to offload the buffers with the model parameters.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n offload_state_dict(offload_dir, model.state_dict())\n@@ -148,6 +165,7 @@ def disk_offload(\n offload=True,\n offload_buffers=offload_buffers,\n weights_map=weights_map,\n+ preload_module_classes=preload_module_classes,\n )\n add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n return model\n@@ -160,6 +178,7 @@ def dispatch_model(\n state_dict: Optional[Dict[str, torch.Tensor]] = None,\n offload_dir: Union[str, os.PathLike] = None,\n offload_buffers: bool = False,\n+ preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n@@ -180,6 +199,11 @@ def dispatch_model(\n The folder in which to offload the model weights (or where the model weights are already offloaded).\n offload_buffers (`bool`, *optional*, defaults to `False`):\n Whether or not to offload the buffers with the model parameters.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n # Error early if the device map is incomplete.\n check_device_map(model, device_map)\n@@ -219,6 +243,7 @@ def dispatch_model(\n offload=offload,\n offload_buffers=offload_buffers,\n weights_map=weights_map,\n+ preload_module_classes=preload_module_classes,\n )\n model.hf_device_map = device_map\n return model\n@@ -234,6 +259,7 @@ def load_checkpoint_and_dispatch(\n offload_buffers: bool = False,\n dtype: Optional[Union[str, torch.dtype]] = None,\n offload_state_dict: bool = False,\n+ preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n@@ -267,6 +293,11 @@ def load_checkpoint_and_dispatch(\n offload_state_dict (`bool`, *optional*, defaults to `False`):\n If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\n the weight of the CPU state dict + the biggest shard does not fit.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n if device_map == \"auto\":\n device_map = infer_auto_device_map(\n@@ -282,4 +313,10 @@ def load_checkpoint_and_dispatch(\n )\n if device_map is None:\n return model\n- return dispatch_model(model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers)\n+ return dispatch_model(\n+ model,\n+ device_map=device_map,\n+ offload_dir=offload_folder,\n+ offload_buffers=offload_buffers,\n+ preload_module_classes=preload_module_classes,\n+ )\ndiff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\nindex dea10ba50..5c04b8621 100644\n--- a/src/accelerate/hooks.py\n+++ b/src/accelerate/hooks.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n import functools\n-from typing import Dict, Mapping, Optional, Union\n+from typing import Dict, List, Mapping, Optional, Union\n \n import torch\n import torch.nn as nn\n@@ -220,17 +220,23 @@ def init_hook(self, module):\n for name, _ in named_module_tensors(module, recurse=self.place_submodules):\n set_module_tensor_to_device(module, name, self.execution_device)\n elif self.offload:\n- self.original_devices = {name: param.device for name, param in named_module_tensors(module)}\n+ self.original_devices = {\n+ name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)\n+ }\n if self.weights_map is None:\n self.weights_map = {\n name: param.to(\"cpu\")\n- for name, param in named_module_tensors(module, include_buffers=self.offload_buffers)\n+ for name, param in named_module_tensors(\n+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules\n+ )\n }\n \n- for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ for name, _ in named_module_tensors(\n+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules\n+ ):\n set_module_tensor_to_device(module, name, \"meta\")\n if not self.offload_buffers and self.execution_device is not None:\n- for name, _ in module.named_buffers(recurse=False):\n+ for name, _ in module.named_buffers(recurse=self.place_submodules):\n set_module_tensor_to_device(module, name, self.execution_device)\n return module\n \n@@ -238,14 +244,18 @@ def pre_forward(self, module, *args, **kwargs):\n if self.io_same_device:\n self.input_device = find_device([args, kwargs])\n if self.offload:\n- for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ for name, _ in named_module_tensors(\n+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules\n+ ):\n set_module_tensor_to_device(module, name, self.execution_device, value=self.weights_map[name])\n \n return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)\n \n def post_forward(self, module, output):\n if self.offload:\n- for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ for name, _ in named_module_tensors(\n+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules\n+ ):\n set_module_tensor_to_device(module, name, \"meta\")\n \n if self.io_same_device and self.input_device is not None:\n@@ -260,7 +270,11 @@ def detach_hook(self, module):\n set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))\n \n \n-def attach_execution_device_hook(module: torch.nn.Module, execution_device: Union[int, str, torch.device]):\n+def attach_execution_device_hook(\n+ module: torch.nn.Module,\n+ execution_device: Union[int, str, torch.device],\n+ preload_module_classes: Optional[List[str]] = None,\n+):\n \"\"\"\n Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right\n execution device\n@@ -270,10 +284,19 @@ def attach_execution_device_hook(module: torch.nn.Module, execution_device: Unio\n The module where we want to attach the hooks.\n execution_device (`int`, `str` or `torch.device`):\n The device on which inputs and model weights should be placed before the forward pass.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n if not hasattr(module, \"_hf_hook\") and len(module.state_dict()) > 0:\n add_hook_to_module(module, AlignDevicesHook(execution_device))\n \n+ # Break the recursion if we get to a preload module.\n+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:\n+ return\n+\n for child in module.children():\n attach_execution_device_hook(child, execution_device)\n \n@@ -285,6 +308,7 @@ def attach_align_device_hook(\n weights_map: Optional[Mapping] = None,\n offload_buffers: bool = False,\n module_name: str = \"\",\n+ preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or\n@@ -303,10 +327,19 @@ def attach_align_device_hook(\n Whether or not to include the associated module's buffers when offloading.\n module_name (`str`, *optional*, defaults to `\"\"`):\n The name of the module.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n # Attach the hook on this module if it has any direct tensor.\n directs = named_module_tensors(module)\n- if len(list(directs)) > 0:\n+ full_offload = (\n+ offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes\n+ )\n+\n+ if len(list(directs)) > 0 or full_offload:\n if weights_map is not None:\n prefix = f\"{module_name}.\" if len(module_name) > 0 else \"\"\n prefixed_weights_map = PrefixedDataset(weights_map, prefix)\n@@ -317,9 +350,14 @@ def attach_align_device_hook(\n offload=offload,\n weights_map=prefixed_weights_map,\n offload_buffers=offload_buffers,\n+ place_submodules=full_offload,\n )\n add_hook_to_module(module, hook)\n \n+ # We stop the recursion in case we hit the full offload.\n+ if full_offload:\n+ return\n+\n # Recurse on all children of the module.\n for child_name, child in module.named_children():\n child_name = f\"{module_name}.{child_name}\" if len(module_name) > 0 else child_name\n@@ -330,6 +368,7 @@ def attach_align_device_hook(\n weights_map=weights_map,\n offload_buffers=offload_buffers,\n module_name=child_name,\n+ preload_module_classes=preload_module_classes,\n )\n \n \n@@ -352,6 +391,7 @@ def attach_align_device_hook_on_blocks(\n weights_map: Mapping = None,\n offload_buffers: bool = False,\n module_name: str = \"\",\n+ preload_module_classes: Optional[List[str]] = None,\n ):\n \"\"\"\n Attaches `AlignDevicesHook` to all blocks of a given model as needed.\n@@ -371,6 +411,11 @@ def attach_align_device_hook_on_blocks(\n Whether or not to include the associated module's buffers when offloading.\n module_name (`str`, *optional*, defaults to `\"\"`):\n The name of the module.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n # If one device and one offload, we've got one hook.\n if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):\n@@ -389,7 +434,7 @@ def attach_align_device_hook_on_blocks(\n return\n \n if not isinstance(execution_device, Mapping):\n- execution_device = {key: offload for key in offload.keys()}\n+ execution_device = {key: execution_device for key in offload.keys()}\n if not isinstance(offload, Mapping):\n offload = {key: offload for key in execution_device.keys()}\n \n@@ -410,11 +455,14 @@ def attach_align_device_hook_on_blocks(\n weights_map=weights_map,\n offload_buffers=offload_buffers,\n module_name=module_name,\n+ preload_module_classes=preload_module_classes,\n )\n if not hasattr(module, \"_hf_hook\"):\n hook = AlignDevicesHook(execution_device=execution_device[module_name], io_same_device=(module_name == \"\"))\n add_hook_to_module(module, hook)\n- attach_execution_device_hook(module, execution_device[module_name])\n+ attach_execution_device_hook(\n+ module, execution_device[module_name], preload_module_classes=preload_module_classes\n+ )\n elif module_name == \"\":\n hook = AlignDevicesHook(io_same_device=True)\n add_hook_to_module(module, hook)\n@@ -428,4 +476,5 @@ def attach_align_device_hook_on_blocks(\n weights_map=weights_map,\n offload_buffers=offload_buffers,\n module_name=child_name,\n+ preload_module_classes=preload_module_classes,\n )\ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\nindex 852b10c3b..4f3054b73 100644\n--- a/tests/test_big_modeling.py\n+++ b/tests/test_big_modeling.py\n@@ -56,6 +56,29 @@ def forward(self, x):\n return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n \n \n+# To test preload_module_classes\n+class ModuleWithUnusedSubModules(nn.Module):\n+ def __init__(self, input_dim, output_dim):\n+ super().__init__()\n+ self.linear = nn.Linear(input_dim, output_dim)\n+\n+ def forward(self, x):\n+ return x @ self.linear.weight.t() + self.linear.bias\n+\n+\n+class ModelWithUnusedSubModulesForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = ModuleWithUnusedSubModules(3, 4)\n+ self.linear2 = ModuleWithUnusedSubModules(4, 5)\n+ self.batchnorm = nn.BatchNorm1d(5)\n+ self.linear3 = ModuleWithUnusedSubModules(5, 6)\n+ self.linear4 = ModuleWithUnusedSubModules(6, 5)\n+\n+ def forward(self, x):\n+ return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n+\n+\n class BigModelingTester(unittest.TestCase):\n def test_init_empty_weights(self):\n # base use\n@@ -107,6 +130,33 @@ def test_cpu_offload(self):\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n \n+ def test_cpu_offload_with_unused_submodules(self):\n+ model = ModelWithUnusedSubModulesForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ cpu_offload(model, execution_device=device, preload_module_classes=[\"ModuleWithUnusedSubModules\"])\n+ output = model(x)\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ cpu_offload(\n+ model,\n+ execution_device=device,\n+ offload_buffers=True,\n+ preload_module_classes=[\"ModuleWithUnusedSubModules\"],\n+ )\n+ output = model(x)\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n+\n @slow\n @require_cuda\n def test_cpu_offload_gpt2(self):\n@@ -145,6 +195,38 @@ def test_disk_offload(self):\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n \n+ def test_disk_offload_with_unused_submodules(self):\n+ model = ModelWithUnusedSubModulesForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(\n+ model, tmp_dir, execution_device=device, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n+ )\n+ output = model(x)\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(\n+ model,\n+ tmp_dir,\n+ execution_device=device,\n+ offload_buffers=True,\n+ preload_module_classes=[\"ModuleWithUnusedSubModules\"],\n+ )\n+ output = model(x)\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n+\n @slow\n @require_cuda\n def test_disk_offload_gpt2(self):\n@@ -237,6 +319,36 @@ def test_dispatch_model_gpt2_on_two_gpus(self):\n \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n )\n \n+ @require_cuda\n+ def test_dispatch_model_with_unused_submodules(self):\n+ model = ModelWithUnusedSubModulesForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 0}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ dispatch_model(\n+ model, device_map, offload_dir=tmp_dir, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n+ )\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_multi_gpu\n+ def test_dispatch_model_with_unused_submodules_multi_gpu(self):\n+ model = ModelWithUnusedSubModulesForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 1}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ dispatch_model(\n+ model, device_map, offload_dir=tmp_dir, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n+ )\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n @require_cuda\n def test_load_checkpoint_and_dispatch(self):\n model = ModelForTest()\n@@ -282,3 +394,55 @@ def test_load_checkpoint_and_dispatch_multi_gpu(self):\n \n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_cuda\n+ def test_load_checkpoint_and_dispatch_with_unused_submodules(self):\n+ model = ModelWithUnusedSubModulesForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"cpu\", \"batchnorm\": 0, \"linear3\": 0, \"linear4\": 0}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), checkpoint)\n+\n+ new_model = ModelWithUnusedSubModulesForTest()\n+ new_model = load_checkpoint_and_dispatch(\n+ new_model, checkpoint, device_map=device_map, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n+ )\n+\n+ # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n+ self.assertEqual(new_model.linear1.linear.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear2.linear.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0))\n+ self.assertEqual(new_model.linear4.linear.weight.device, torch.device(0))\n+\n+ output = new_model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_multi_gpu\n+ def test_load_checkpoint_and_dispatch_multi_gpu_with_unused_submodules(self):\n+ model = ModelWithUnusedSubModulesForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"cpu\", \"batchnorm\": 0, \"linear3\": 0, \"linear4\": 1}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), checkpoint)\n+\n+ new_model = ModelWithUnusedSubModulesForTest()\n+ new_model = load_checkpoint_and_dispatch(\n+ new_model, checkpoint, device_map=device_map, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n+ )\n+\n+ # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n+ self.assertEqual(new_model.linear1.linear.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear2.linear.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0))\n+ self.assertEqual(new_model.linear4.linear.weight.device, torch.device(1))\n+\n+ output = new_model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n", "code_comments": [ { "body": "```suggestion\r\n preloaded_modules (`List[str]`, *optional*):\r\n```\r\n\r\nWhat about something such as `preloaded_modules`?", "diff_hunk": "@@ -371,6 +411,11 @@ def attach_align_device_hook_on_blocks(\n Whether or not to include the associated module's buffers when offloading.\n module_name (`str`, *optional*, defaults to `\"\"`):\n The name of the module.\n+ load_all_weights_classes (`List[str]`, *optional*):", "from_author": false }, { "body": "Maybe `preloaded_module_classes` to be similar to `no_split_module_classes`? I like `preloaded` a lot, thanks for the suggestion!", "diff_hunk": "@@ -371,6 +411,11 @@ def attach_align_device_hook_on_blocks(\n Whether or not to include the associated module's buffers when offloading.\n module_name (`str`, *optional*, defaults to `\"\"`):\n The name of the module.\n+ load_all_weights_classes (`List[str]`, *optional*):", "from_author": true }, { "body": "Works with me!", "diff_hunk": "@@ -371,6 +411,11 @@ def attach_align_device_hook_on_blocks(\n Whether or not to include the associated module's buffers when offloading.\n module_name (`str`, *optional*, defaults to `\"\"`):\n The name of the module.\n+ load_all_weights_classes (`List[str]`, *optional*):", "from_author": false }, { "body": "```suggestion\r\n # Break the recursion if we get to a preload module.\r\n```", "diff_hunk": "@@ -270,10 +284,19 @@ def attach_execution_device_hook(module: torch.nn.Module, execution_device: Unio\n The module where we want to attach the hooks.\n execution_device (`int`, `str` or `torch.device`):\n The device on which inputs and model weights should be placed before the forward pass.\n+ preload_module_classes (`List[str]`, *optional*):\n+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning\n+ of the forward. This should only be used for classes that have submodules which are registered but not\n+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,\n+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.\n \"\"\"\n if not hasattr(module, \"_hf_hook\") and len(module.state_dict()) > 0:\n add_hook_to_module(module, AlignDevicesHook(execution_device))\n \n+ # Break the recursion if we get to a load all weights module.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/442", "pr_id": 966129738 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 407b90184..0ead08c81 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -49,6 +49,7 @@\n get_pretty_name,\n is_deepspeed_available,\n is_torch_version,\n+ is_tpu_available,\n is_transformers_available,\n pad_across_processes,\n reduce,\n@@ -68,6 +69,9 @@\n DummyScheduler,\n )\n \n+if is_tpu_available():\n+ import torch_xla.distributed.xla_multiprocessing as xmp\n+\n logger = get_logger(__name__)\n \n \n@@ -546,6 +550,8 @@ def prepare_model(self, model):\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n+ if self.distributed_type == DistributedType.TPU and self.state.fork_launched:\n+ model = xmp.MpModelWrapper(model).to(self.device)\n return model\n \n def _prepare_deepspeed(self, *args):\n@@ -712,7 +718,7 @@ def prepare_data_loader(self, data_loader):\n num_processes=self.num_processes,\n process_index=self.process_index,\n split_batches=self.split_batches,\n- put_on_device=self.device_placement,\n+ put_on_device=self.device_placement if self.distributed_type != DistributedType.TPU else False,\n rng_types=self.rng_types.copy(),\n dispatch_batches=self.dispatch_batches,\n )\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 79ba6b3f3..eb26c3a60 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -35,7 +35,7 @@\n \n \n if is_tpu_available():\n- import torch_xla.core.xla_model as xm\n+ import torch_xla.distributed.parallel_loader as xpl\n \n \n # kwargs of the DataLoader in min version 1.4.0.\n@@ -298,10 +298,7 @@ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwarg\n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.generator)\n- state = AcceleratorState()\n for batch in super().__iter__():\n- if state.distributed_type == DistributedType.TPU:\n- xm.mark_step()\n yield batch if self.device is None else send_to_device(batch, self.device)\n \n \n@@ -408,9 +405,6 @@ def __iter__(self):\n batch_size += 1\n \n data_slice = slice(state.process_index * batch_size, (state.process_index + 1) * batch_size)\n-\n- if state.distributed_type == DistributedType.TPU:\n- xm.mark_step()\n yield slice_tensors(batch, data_slice)\n \n def __len__(self):\n@@ -565,15 +559,19 @@ def prepare_data_loader(\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n if dispatch_batches:\n- return DataLoaderDispatcher(\n+ dataloader = DataLoaderDispatcher(\n new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs\n )\n+ else:\n+ dataloader = DataLoaderShard(\n+ new_dataset,\n+ device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n+ batch_sampler=new_batch_sampler,\n+ rng_types=rng_types,\n+ generator=generator,\n+ **kwargs,\n+ )\n \n- return DataLoaderShard(\n- new_dataset,\n- device=device if put_on_device else None,\n- batch_sampler=new_batch_sampler,\n- rng_types=rng_types,\n- generator=generator,\n- **kwargs,\n- )\n+ if state.distributed_type == DistributedType.TPU:\n+ return xpl.MpDeviceLoader(dataloader, device)\n+ return dataloader\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 6e09fdae4..891b0762b 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -75,6 +75,7 @@ def __init__(\n self.__dict__ = self._shared_state\n if parse_flag_from_env(\"USE_CPU\"):\n cpu = True\n+ self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\n if not getattr(self, \"initialized\", False):\n self.backend = None\n self.deepspeed_plugin = None\n@@ -90,7 +91,9 @@ def __init__(\n self.process_index = xm.get_ordinal()\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n- self.mixed_precision = \"no\"\n+ self.mixed_precision = (\n+ parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ )\n elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n assert (\n is_deepspeed_available()\ndiff --git a/src/accelerate/test_utils/scripts/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\nindex a23179bd7..8ed869edc 100644\n--- a/src/accelerate/test_utils/scripts/test_script.py\n+++ b/src/accelerate/test_utils/scripts/test_script.py\n@@ -326,7 +326,8 @@ def main():\n if state.local_process_index == 0:\n print(\"\\n**DataLoader integration test**\")\n dl_preparation_check()\n- central_dl_preparation_check()\n+ if state.distributed_type != DistributedType.TPU:\n+ central_dl_preparation_check()\n \n # Trainings are not exactly the same in DeepSpeed and CPU mode\n if state.distributed_type == DistributedType.DEEPSPEED:\n@@ -337,5 +338,10 @@ def main():\n training_check()\n \n \n+def _mp_fn(index):\n+ # For xla_spawn (TPUs)\n+ main()\n+\n+\n if __name__ == \"__main__\":\n main()\ndiff --git a/src/accelerate/test_utils/scripts/test_sync.py b/src/accelerate/test_utils/scripts/test_sync.py\nindex 2f5cc3722..b58a8c092 100644\n--- a/src/accelerate/test_utils/scripts/test_sync.py\n+++ b/src/accelerate/test_utils/scripts/test_sync.py\n@@ -126,5 +126,10 @@ def main():\n test_distributed_sync(accelerator)\n \n \n+def _mp_fn(index):\n+ # For xla_spawn (TPUs)\n+ main()\n+\n+\n if __name__ == \"__main__\":\n main()\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex d4f0042fb..c74185c5d 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -68,4 +68,5 @@ def __call__(self, index, *args):\n os.environ[\"LOCAL_RANK\"] = str(index)\n os.environ[\"RANK\"] = str(index)\n \n+ os.environ[\"FORK_LAUNCHED\"] = str(1)\n self.launcher(*args)\ndiff --git a/tests/test_tpu.py b/tests/test_tpu.py\nindex d0b2f0b72..bffa8b8b4 100644\n--- a/tests/test_tpu.py\n+++ b/tests/test_tpu.py\n@@ -24,7 +24,7 @@\n class MultiTPUTester(unittest.TestCase):\n def setUp(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n- self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"test_script.py\"])\n+ self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_script.py\"])\n self.test_dir = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])\n \n @require_tpu\ndiff --git a/tests/xla_spawn.py b/tests/xla_spawn.py\nindex c65878056..1a07af29c 100644\n--- a/tests/xla_spawn.py\n+++ b/tests/xla_spawn.py\n@@ -78,7 +78,6 @@ def main():\n \n # Patch sys.argv\n sys.argv = [args.training_script] + args.training_script_args + [\"--tpu_num_cores\", str(args.num_cores)]\n-\n xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores)\n \n \n", "code_comments": [ { "body": "No, this shouldn't always be done IMO. This is an optimization for Colab/notebooks so we should only do it when we arrive at this code after a `notebook_launcher`.\r\n\r\nOr be ready to benchmark the scripts on TPU to show that this does not affect performance in any way, but usually optimizations for memory come at a cost for speed.", "diff_hunk": "@@ -546,6 +553,8 @@ def prepare_model(self, model):\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n+ if self.distributed_type == DistributedType.TPU:\n+ model = xmp.MpModelWrapper(model).to(self.device)", "from_author": false }, { "body": "Same, would like to see some benchmarks on launching a script on a TPU VM to make sure this does not affect speed.", "diff_hunk": "@@ -565,15 +563,19 @@ def prepare_data_loader(\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n if dispatch_batches:\n- return DataLoaderDispatcher(\n+ dataloader = DataLoaderDispatcher(\n new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs\n )\n+ else:\n+ dataloader = DataLoaderShard(\n+ new_dataset,\n+ device=device if put_on_device else None,\n+ batch_sampler=new_batch_sampler,\n+ rng_types=rng_types,\n+ generator=generator,\n+ **kwargs,\n+ )\n \n- return DataLoaderShard(\n- new_dataset,\n- device=device if put_on_device else None,\n- batch_sampler=new_batch_sampler,\n- rng_types=rng_types,\n- generator=generator,\n- **kwargs,\n- )\n+ if state.distributed_type == DistributedType.TPU:\n+ return xpl.MpDeviceLoader(dataloader, device)", "from_author": false }, { "body": "Here's the docs on [MpModelWrapper](https://pytorch.org/xla/release/1.11/index.html#torch_xla.distributed.xla_multiprocessing.MpModelWrapper), but here's their comment on it:\r\n\r\n> This class should be used together with the spawn(…, start_method=’fork’) API to minimize the use of host memory. Instead of creating models on each multiprocessing process, hence replicating the model’s initial host memory, the model is created once at global scope, and then moved into each device inside the spawn() target function. This method has two advantages. First it uses only one copy of the memory pages to host the original model weights, and second it serializes the move of the wrapped model into each device, by lowering the load onto the system memory during the process.\r\n\r\nWill perform benchmarks today and tomorrow", "diff_hunk": "@@ -546,6 +553,8 @@ def prepare_model(self, model):\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n+ if self.distributed_type == DistributedType.TPU:\n+ model = xmp.MpModelWrapper(model).to(self.device)", "from_author": true }, { "body": "Yes, and we don't use that spawn method when launching a script on TPU, hence why I don't think we should use it if we don't come from a colab/notebook here :-)", "diff_hunk": "@@ -546,6 +553,8 @@ def prepare_model(self, model):\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n+ if self.distributed_type == DistributedType.TPU:\n+ model = xmp.MpModelWrapper(model).to(self.device)", "from_author": false }, { "body": "Makes sense! Will test and report back if there's a loss at all :) ", "diff_hunk": "@@ -546,6 +553,8 @@ def prepare_model(self, model):\n else:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n+ if self.distributed_type == DistributedType.TPU:\n+ model = xmp.MpModelWrapper(model).to(self.device)", "from_author": true }, { "body": "```suggestion\r\n self.fork_launched = parse_flag_from_env(\"FORK_LAUNCHED\", 0)\r\n```\r\nMight be clearer this way.", "diff_hunk": "@@ -75,6 +75,7 @@ def __init__(\n self.__dict__ = self._shared_state\n if parse_flag_from_env(\"USE_CPU\"):\n cpu = True\n+ self.use_fork = parse_flag_from_env(\"USE_LAUNCHER\", 0)", "from_author": false }, { "body": "Makes sense!", "diff_hunk": "@@ -75,6 +75,7 @@ def __init__(\n self.__dict__ = self._shared_state\n if parse_flag_from_env(\"USE_CPU\"):\n cpu = True\n+ self.use_fork = parse_flag_from_env(\"USE_LAUNCHER\", 0)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks for fixing up the TPU support with best practices! Looks great to me! πŸ”₯ ", "from_author": false }, { "body": "@sgugger this PR also modifies the TPU selection in `AcceleratorState` to actually use mixed precision types.\r\n\r\n- [FP16 Support](https://github.com/pytorch/xla/issues/1936)\r\n- [BF16 Support](https://pytorch.org/xla/release/1.11/index.html#xla-tensors-and-bfloat16)", "from_author": true }, { "body": "We're passing πŸ˜„ \r\n![image](https://user-images.githubusercontent.com/7831895/173685310-82547d2a-2219-4236-b899-dd5b7599a9ef.png)\r\n", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/441", "pr_id": 964789954 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 8801011ec..407b90184 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -199,6 +199,10 @@ def __init__(\n ds_config.update({\"bf16\": {\"enabled\": True}})\n self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n \n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" or isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n+ if is_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n+\n if fsdp_plugin is None: # init from env variables\n fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" else None\n else:\n@@ -206,10 +210,6 @@ def __init__(\n raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\n os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n- if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n- if is_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):\n- raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n-\n # Kwargs handlers\n self.ddp_handler = None\n self.scaler_handler = None\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/437", "pr_id": 963675155 }, { "diff": "diff --git a/README.md b/README.md\nindex 44b25778e..c67d5dd50 100644\n--- a/README.md\n+++ b/README.md\n@@ -241,4 +241,5 @@ pip install accelerate\n - multi-GPU on several nodes (machines)\n - TPU\n - FP16 with native AMP (apex on the roadmap)\n-- DeepSpeed support (experimental)\n+- DeepSpeed support (Experimental)\n+- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 0ead08c81..7b6c54608 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -19,7 +19,6 @@\n import sys\n import warnings\n from contextlib import contextmanager\n-from copy import deepcopy\n from typing import List, Optional, Union\n \n import torch\n@@ -50,7 +49,6 @@\n is_deepspeed_available,\n is_torch_version,\n is_tpu_available,\n- is_transformers_available,\n pad_across_processes,\n reduce,\n save,\n@@ -183,25 +181,10 @@ def __init__(\n raise ImportError(\"DeepSpeed is not installed => run `pip install deepspeed` or build it from source.\")\n if compare_versions(\"deepspeed\", \"<\", \"0.6.5\"):\n raise ImportError(\"DeepSpeed version must be >= 0.6.5. Please update DeepSpeed.\")\n- if os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\" or deepspeed_plugin.zero3_init_flag:\n- if not is_transformers_available():\n- raise Exception(\n- \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n- \"Please run `pip install transformers`.\"\n- )\n- from transformers.deepspeed import HfDeepSpeedConfig\n \n- ds_config = deepcopy(deepspeed_plugin.deepspeed_config)\n- del ds_config[\"train_batch_size\"]\n- ds_config.update({\"train_micro_batch_size_per_gpu\": 1, \"gradient_accumulation_steps\": 1})\n- mixed_precision = (\n- os.environ.get(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n- if mixed_precision == \"fp16\":\n- ds_config.update({\"fp16\": {\"enabled\": True}})\n- elif mixed_precision == \"bf16\":\n- ds_config.update({\"bf16\": {\"enabled\": True}})\n- self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n+ mixed_precision = os.environ.get(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ deepspeed_plugin.set_mixed_precision(mixed_precision)\n+ deepspeed_plugin.set_deepspeed_weakref()\n \n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" or isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n if is_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 2b59d39f6..2bc6fbe4d 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -353,7 +353,7 @@ def deepspeed_launcher(args):\n current_env[\"USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n- current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping)\n+ current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping).lower()\n current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n current_env[\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n current_env[\"DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 891b0762b..e341ad93f 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -108,21 +108,6 @@ def __init__(\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\n- mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n- if (\n- mixed_precision == \"fp16\"\n- and \"fp16\" not in deepspeed_plugin.deepspeed_config\n- and \"bf16\" not in deepspeed_plugin.deepspeed_config\n- ):\n- deepspeed_plugin.deepspeed_config.update({\"fp16\": {\"enabled\": True}})\n- elif (\n- mixed_precision == \"bf16\"\n- and \"fp16\" not in deepspeed_plugin.deepspeed_config\n- and \"bf16\" not in deepspeed_plugin.deepspeed_config\n- ):\n- deepspeed_plugin.deepspeed_config.update({\"bf16\": {\"enabled\": True}})\n self.deepspeed_plugin = deepspeed_plugin\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\n self.distributed_type = DistributedType.MULTI_GPU\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex a3e193915..60f39c90a 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -83,6 +83,7 @@\n DeepSpeedSchedulerWrapper,\n DummyOptim,\n DummyScheduler,\n+ HfDeepSpeedConfig,\n )\n \n from .launch import PrepareForLaunch, get_launch_prefix\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex b731ca12a..8b8e30304 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -19,14 +19,12 @@\n import copy\n import enum\n import functools\n-import io\n-import json\n import os\n import typing\n import warnings\n from dataclasses import dataclass, field\n from datetime import timedelta\n-from typing import Callable, Iterable, Optional\n+from typing import Any, Callable, Iterable, Optional\n \n import torch\n \n@@ -215,7 +213,12 @@ class DeepSpeedPlugin:\n This plugin is used to integrate DeepSpeed.\n \"\"\"\n \n- config_file: str = field(default=None, metadata={\"help\": \"Path to the DeepSpeed config file.\"})\n+ hf_ds_config: Any = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`.\"\n+ },\n+ )\n gradient_accumulation_steps: int = field(\n default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n )\n@@ -249,17 +252,23 @@ class DeepSpeedPlugin:\n )\n \n def __post_init__(self):\n- if self.config_file is None:\n- self.config_file = os.environ.get(\"DEEPSPEED_CONFIG_FILE\", \"none\")\n- if self.config_file != \"none\":\n- with io.open(self.config_file, \"r\", encoding=\"utf-8\") as f:\n- self.deepspeed_config = json.load(f)\n- if \"gradient_accumulation_steps\" not in self.deepspeed_config:\n- self.deepspeed_config[\"gradient_accumulation_steps\"] = 1\n- elif self.deepspeed_config[\"gradient_accumulation_steps\"] == \"auto\":\n- raise ValueError(\"gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config file.\")\n- if \"zero_optimization\" not in self.deepspeed_config:\n- raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config file.\")\n+ from .deepspeed import HfDeepSpeedConfig\n+\n+ if self.hf_ds_config is None:\n+ self.hf_ds_config = os.environ.get(\"DEEPSPEED_CONFIG_FILE\", \"none\")\n+ if (\n+ isinstance(self.hf_ds_config, dict)\n+ or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != \"none\")\n+ or isinstance(self.hf_ds_config, HfDeepSpeedConfig)\n+ ):\n+ if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):\n+ self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)\n+ if \"gradient_accumulation_steps\" not in self.hf_ds_config.config:\n+ self.hf_ds_config.config[\"gradient_accumulation_steps\"] = 1\n+ elif self.hf_ds_config.config[\"gradient_accumulation_steps\"] == \"auto\":\n+ raise ValueError(\"gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config.\")\n+ if \"zero_optimization\" not in self.hf_ds_config.config:\n+ raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config.\")\n else:\n if self.gradient_accumulation_steps is None:\n self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n@@ -281,8 +290,9 @@ def __post_init__(self):\n if self.zero3_save_16bit_model is None:\n self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n \n- self.deepspeed_config = {\n+ config = {\n \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n \"zero_optimization\": {\n \"stage\": self.zero_stage,\n@@ -296,29 +306,18 @@ def __post_init__(self):\n },\n }\n if self.gradient_clipping:\n- self.deepspeed_config[\"gradient_clipping\"] = self.gradient_clipping\n+ config[\"gradient_clipping\"] = self.gradient_clipping\n+ self.hf_ds_config = HfDeepSpeedConfig(config)\n+ self.deepspeed_config = self.hf_ds_config.config\n self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n if self.zero3_init_flag is None:\n self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n- if self.zero3_init_flag and self.deepspeed_config[\"zero_optimization\"][\"stage\"] != 3:\n+ if self.zero3_init_flag and not self.hf_ds_config.is_zero3():\n warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\n self.zero3_init_flag = False\n \n- def find_config_node(self, ds_key_long):\n- config = self.deepspeed_config\n-\n- # find the config node of interest if it exists\n- nodes = ds_key_long.split(\".\")\n- ds_key = nodes.pop()\n- for node in nodes:\n- config = config.get(node)\n- if config is None:\n- return None, ds_key\n-\n- return config, ds_key\n-\n def fill_match(self, ds_key_long, mismatches, must_match=True, **kwargs):\n- config, ds_key = self.find_config_node(ds_key_long)\n+ config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)\n if config is None:\n return\n \n@@ -360,6 +359,37 @@ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must\n f\" values:\\n{mismatches_msg}\\nThe easiest method is to set these DeepSpeed config values to 'auto'.\"\n )\n \n+ def set_mixed_precision(self, mixed_precision):\n+ ds_config = self.deepspeed_config\n+ if mixed_precision == \"fp16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n+ ds_config.update({\"fp16\": {\"enabled\": True}})\n+ elif mixed_precision == \"bf16\" and \"fp16\" not in ds_config and \"bf16\" not in ds_config:\n+ ds_config.update({\"bf16\": {\"enabled\": True}})\n+\n+ def set_deepspeed_weakref(self):\n+ from .imports import is_transformers_available\n+\n+ if self.zero3_init_flag:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip install transformers`.\"\n+ )\n+ ds_config = copy.deepcopy(self.deepspeed_config)\n+ if \"gradient_accumulation_steps\" not in ds_config or ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+\n+ from transformers.deepspeed import HfDeepSpeedConfig\n+\n+ self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n+\n \n @dataclass\n class FullyShardedDataParallelPlugin:\ndiff --git a/src/accelerate/utils/deepspeed.py b/src/accelerate/utils/deepspeed.py\nindex dde1b9c63..02d1ab8bc 100644\n--- a/src/accelerate/utils/deepspeed.py\n+++ b/src/accelerate/utils/deepspeed.py\n@@ -12,9 +12,132 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from accelerate.scheduler import AcceleratedScheduler\n+import io\n+import json\n+from copy import deepcopy\n \n from ..optimizer import AcceleratedOptimizer\n+from ..scheduler import AcceleratedScheduler\n+\n+\n+class HfDeepSpeedConfig:\n+ \"\"\"\n+ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.\n+\n+ A `weakref` of this object is stored in the module's globals to be able to access the config from areas where\n+ things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore\n+ it's important that this object remains alive while the program is still running.\n+\n+ [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration\n+ with values of [`TrainingArguments`] by replacing special placeholder values: `\"auto\"`. Without this special logic\n+ the DeepSpeed configuration is not modified in any way.\n+\n+ Args:\n+ config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.\n+\n+ \"\"\"\n+\n+ def __init__(self, config_file_or_dict):\n+\n+ if isinstance(config_file_or_dict, dict):\n+ # Don't modify user's data should they want to reuse it (e.g. in tests), because once we\n+ # modified it, it will not be accepted here again, since `auto` values would have been overridden\n+ config = deepcopy(config_file_or_dict)\n+ elif isinstance(config_file_or_dict, str):\n+ with io.open(config_file_or_dict, \"r\", encoding=\"utf-8\") as f:\n+ config = json.load(f)\n+ else:\n+ raise ValueError(\"expecting either a path to a DeepSpeed config file or a pre-populated dict\")\n+ self.config = config\n+\n+ # zero stage - this is done as early as possible, before model is created, to allow\n+ # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\n+ # during ``zero.Init()`` which needs to know the dtype, and some other hparams.\n+ self._stage = self.get_value(\"zero_optimization.stage\", -1)\n+\n+ # offload\n+ self._offload = False\n+ if self.is_zero2() or self.is_zero3():\n+ offload_devices_valid = set([\"cpu\", \"nvme\"])\n+ offload_devices = set(\n+ [\n+ self.get_value(\"zero_optimization.offload_optimizer.device\"),\n+ self.get_value(\"zero_optimization.offload_param.device\"),\n+ ]\n+ )\n+ if len(offload_devices & offload_devices_valid) > 0:\n+ self._offload = True\n+\n+ def find_config_node(self, ds_key_long):\n+ config = self.config\n+\n+ # find the config node of interest if it exists\n+ nodes = ds_key_long.split(\".\")\n+ ds_key = nodes.pop()\n+ for node in nodes:\n+ config = config.get(node)\n+ if config is None:\n+ return None, ds_key\n+\n+ return config, ds_key\n+\n+ def get_value(self, ds_key_long, default=None):\n+ \"\"\"\n+ Returns the set value or `default` if no value is set\n+ \"\"\"\n+ config, ds_key = self.find_config_node(ds_key_long)\n+ if config is None:\n+ return default\n+ return config.get(ds_key, default)\n+\n+ def del_config_sub_tree(self, ds_key_long, must_exist=False):\n+ \"\"\"\n+ Deletes a sub-section of the config file if it's found.\n+\n+ Unless `must_exist` is `True` the section doesn't have to exist.\n+ \"\"\"\n+ config = self.config\n+\n+ # find the config node of interest if it exists\n+ nodes = ds_key_long.split(\".\")\n+ for node in nodes:\n+ parent_config = config\n+ config = config.get(node)\n+ if config is None:\n+ if must_exist:\n+ raise ValueError(f\"Can't find {ds_key_long} entry in the config: {self.config}\")\n+ else:\n+ return\n+\n+ # if found remove it\n+ if parent_config is not None:\n+ parent_config.pop(node)\n+\n+ def is_true(self, ds_key_long):\n+ \"\"\"\n+ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very\n+ specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set).\n+\n+ \"\"\"\n+ value = self.get_value(ds_key_long)\n+ return False if value is None else bool(value)\n+\n+ def is_false(self, ds_key_long):\n+ \"\"\"\n+ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very\n+ specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set).\n+ \"\"\"\n+ value = self.get_value(ds_key_long)\n+ return False if value is None else not bool(value)\n+\n+ def is_zero2(self):\n+ return self._stage == 2\n+\n+ def is_zero3(self):\n+ return self._stage == 3\n+\n+ def is_offload(self):\n+ return self._offload\n \n \n class DeepSpeedEngineWrapper:\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex 9ece7b0f7..12cca415c 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -40,7 +40,6 @@\n )\n from parameterized import parameterized\n from transformers import AutoModel, AutoModelForCausalLM, get_scheduler\n-from transformers.deepspeed import HfDeepSpeedConfig\n from transformers.testing_utils import mockenv_context\n from transformers.trainer_utils import set_seed\n from transformers.utils import is_torch_bf16_available\n@@ -153,7 +152,7 @@ def test_deepspeed_plugin(self, stage):\n deepspeed_plugin.deepspeed_config = None\n \n # Test config files are loaded correctly\n- deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[stage], zero3_init_flag=True)\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage], zero3_init_flag=True)\n if stage == ZERO2:\n self.assertFalse(deepspeed_plugin.zero3_init_flag)\n elif stage == ZERO3:\n@@ -165,7 +164,7 @@ def test_deepspeed_plugin(self, stage):\n del ds_config[\"gradient_accumulation_steps\"]\n with open(os.path.join(dirpath, \"ds_config.json\"), \"w\") as out_file:\n json.dump(ds_config, out_file)\n- deepspeed_plugin = DeepSpeedPlugin(config_file=os.path.join(dirpath, \"ds_config.json\"))\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, \"ds_config.json\"))\n self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"], 1)\n deepspeed_plugin.deepspeed_config = None\n \n@@ -176,14 +175,14 @@ def test_deepspeed_plugin(self, stage):\n with open(os.path.join(dirpath, \"ds_config.json\"), \"w\") as out_file:\n json.dump(ds_config, out_file)\n with self.assertRaises(ValueError) as cm:\n- deepspeed_plugin = DeepSpeedPlugin(config_file=os.path.join(dirpath, \"ds_config.json\"))\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, \"ds_config.json\"))\n self.assertTrue(\n- \"Please specify the ZeRO optimization config in the DeepSpeed config file.\" in str(cm.exception)\n+ \"Please specify the ZeRO optimization config in the DeepSpeed config.\" in str(cm.exception)\n )\n deepspeed_plugin.deepspeed_config = None\n \n # Test `deepspeed_config_process`\n- deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[stage])\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage])\n kwargs = {\n \"fp16.enabled\": True,\n \"bf16.enabled\": False,\n@@ -202,7 +201,7 @@ def test_deepspeed_plugin(self, stage):\n }\n deepspeed_plugin.deepspeed_config_process(**kwargs)\n for ds_key_long, value in kwargs.items():\n- config, ds_key = deepspeed_plugin.find_config_node(ds_key_long)\n+ config, ds_key = deepspeed_plugin.hf_ds_config.find_config_node(ds_key_long)\n if config.get(ds_key) is not None:\n self.assertEqual(config.get(ds_key), value)\n \n@@ -245,7 +244,7 @@ def test_accelerate_state_deepspeed(self, dtype):\n zero3_init_flag=True,\n )\n with mockenv_context(**self.dist_env):\n- state = AcceleratorState(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin, _from_accelerator=True)\n+ state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state\n self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\n state.initialized = False\n \n@@ -262,8 +261,10 @@ def test_init_zero3(self):\n \n with mockenv_context(**self.dist_env):\n accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n- self.assertTrue(\"dschf\" in accelerator.__dict__)\n- self.assertTrue(type(accelerator.dschf) == HfDeepSpeedConfig)\n+ from transformers.deepspeed import is_deepspeed_zero3_enabled\n+\n+ self.assertTrue(is_deepspeed_zero3_enabled())\n+ accelerator.state.initialized = False\n \n @parameterized.expand(optim_scheduler_params, name_func=parameterized_custom_name_func)\n def test_prepare_deepspeed(self, optim_type, scheduler_type):\n@@ -301,7 +302,6 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n )\n with mockenv_context(**self.dist_env):\n accelerator = Accelerator(mixed_precision=\"fp16\", deepspeed_plugin=deepspeed_plugin)\n- self.assertEqual(accelerator.state.deepspeed_plugin.config_file, \"none\")\n \n train_set = RegressionDataset(length=80)\n eval_set = RegressionDataset(length=20)\n@@ -354,7 +354,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n \n elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER:\n # Test DeepSpeed optimizer + DeepSpeed scheduler\n- deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[ZERO2])\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n train_set = RegressionDataset(length=80)\n@@ -414,7 +414,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n \n elif optim_type == CUSTOM_OPTIMIZER and scheduler_type == DS_SCHEDULER:\n # Test custom optimizer + DeepSpeed scheduler\n- deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[ZERO2])\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n train_set = RegressionDataset(length=80)\n@@ -447,7 +447,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)\n elif optim_type == DS_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:\n # Test deepspeed optimizer + custom scheduler\n- deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[ZERO2])\n+ deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])\n with mockenv_context(**self.dist_env):\n accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n train_set = RegressionDataset(length=80)\n@@ -483,7 +483,7 @@ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n \n def test_save_checkpoints(self):\n deepspeed_plugin = DeepSpeedPlugin(\n- config_file=self.ds_config_file[ZERO3],\n+ hf_ds_config=self.ds_config_file[ZERO3],\n zero3_init_flag=True,\n )\n del deepspeed_plugin.deepspeed_config[\"bf16\"]\n@@ -537,7 +537,7 @@ def test_save_checkpoints(self):\n \n def test_autofill_dsconfig(self):\n deepspeed_plugin = DeepSpeedPlugin(\n- config_file=self.ds_config_file[ZERO3],\n+ hf_ds_config=self.ds_config_file[ZERO3],\n zero3_init_flag=True,\n )\n del deepspeed_plugin.deepspeed_config[\"bf16\"]\n", "code_comments": [ { "body": "This is breaking. While the API was marked as experimental and we can do such a change, I'd like to understand why we can't keep the old argument name here.", "diff_hunk": "@@ -215,7 +213,12 @@ class DeepSpeedPlugin:\n This plugin is used to integrate DeepSpeed.\n \"\"\"\n \n- config_file: str = field(default=None, metadata={\"help\": \"Path to the DeepSpeed config file.\"})\n+ hf_ds_config: Any = field(", "from_author": false }, { "body": "Hello, this isn't breaking change as this feature was added in PR from couple days ago #405 . It isn't part of the previous release (0.9.0) and as such won't be a breaking change. The renaming was done to clearly explain that it isn't just limited to file and can accept either file, dict or `HFDeepSpeedConfig` Object.", "diff_hunk": "@@ -215,7 +213,12 @@ class DeepSpeedPlugin:\n This plugin is used to integrate DeepSpeed.\n \"\"\"\n \n- config_file: str = field(default=None, metadata={\"help\": \"Path to the DeepSpeed config file.\"})\n+ hf_ds_config: Any = field(", "from_author": true }, { "body": "Oh okay then, I thought it had been there for longer.", "diff_hunk": "@@ -215,7 +213,12 @@ class DeepSpeedPlugin:\n This plugin is used to integrate DeepSpeed.\n \"\"\"\n \n- config_file: str = field(default=None, metadata={\"help\": \"Path to the DeepSpeed config file.\"})\n+ hf_ds_config: Any = field(", "from_author": false }, { "body": "Let's check if the key exists before setting it a None?", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig\n+\n+ if isinstance(hf_ds_config, HfDeepSpeedConfig):\n+ AcceleratorState._shared_state[\"deepspeed_plugin\"] = DeepSpeedPlugin(\n+ hf_ds_config=hf_ds_config, zero3_init_flag=True\n+ )\n+ else:\n+ raise ValueError(\"`hf_ds_config` must be an instance of `accelerate.utils.HfDeepSpeedConfig`\")\n+\n+ @staticmethod\n+ def unset_deepspeed_config():\n+ AcceleratorState._shared_state[\"deepspeed_plugin\"] = None", "from_author": false }, { "body": "Let's error first if DeepSpeed is not available.", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig", "from_author": false }, { "body": "setting deepspeed_plugin doesn't use any deepspeed functionality; Accelerate and Trainer check DeepSpeed availability at different places. Should we still check deepspeed availability here?", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig", "from_author": true }, { "body": "As we are directly setting the key in the dictionary, if it isn't present, it will be set to None which should not make a difference. I don't think it is required. ", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig\n+\n+ if isinstance(hf_ds_config, HfDeepSpeedConfig):\n+ AcceleratorState._shared_state[\"deepspeed_plugin\"] = DeepSpeedPlugin(\n+ hf_ds_config=hf_ds_config, zero3_init_flag=True\n+ )\n+ else:\n+ raise ValueError(\"`hf_ds_config` must be an instance of `accelerate.utils.HfDeepSpeedConfig`\")\n+\n+ @staticmethod\n+ def unset_deepspeed_config():\n+ AcceleratorState._shared_state[\"deepspeed_plugin\"] = None", "from_author": true }, { "body": "Oh I thought there was some DeepSpeed import in that file. Okay to leave as is.", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig", "from_author": false }, { "body": "As long as you're sure it won't make any deepspeed related stuff break, fine by me.", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig\n+\n+ if isinstance(hf_ds_config, HfDeepSpeedConfig):\n+ AcceleratorState._shared_state[\"deepspeed_plugin\"] = DeepSpeedPlugin(\n+ hf_ds_config=hf_ds_config, zero3_init_flag=True\n+ )\n+ else:\n+ raise ValueError(\"`hf_ds_config` must be an instance of `accelerate.utils.HfDeepSpeedConfig`\")\n+\n+ @staticmethod\n+ def unset_deepspeed_config():\n+ AcceleratorState._shared_state[\"deepspeed_plugin\"] = None", "from_author": false }, { "body": "Reverted this change as per latest discussions on transformers PR", "diff_hunk": "@@ -193,3 +194,47 @@ def __repr__(self):\n @property\n def use_fp16(self):\n return self.mixed_precision != \"no\"\n+\n+ @staticmethod\n+ def is_deepspeed_zero3_enabled():\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ deepspeed_plugin = AcceleratorState._shared_state[\"deepspeed_plugin\"]\n+ return deepspeed_plugin.hf_ds_config.is_zero3() and deepspeed_plugin.zero3_init_flag\n+ return False\n+\n+ @staticmethod\n+ def get_deepspeed_config():\n+ ds_config = None\n+ if (\n+ \"deepspeed_plugin\" in AcceleratorState._shared_state\n+ and AcceleratorState._shared_state[\"deepspeed_plugin\"] is not None\n+ ):\n+ ds_config = deepcopy(AcceleratorState._shared_state[\"deepspeed_plugin\"].hf_ds_config.config)\n+ if ds_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ ds_config[\"gradient_accumulation_steps\"] = 1\n+ if (\n+ \"train_micro_batch_size_per_gpu\" not in ds_config\n+ or ds_config[\"train_micro_batch_size_per_gpu\"] == \"auto\"\n+ ):\n+ ds_config[\"train_micro_batch_size_per_gpu\"] = 1\n+ if ds_config[\"train_batch_size\"] == \"auto\":\n+ del ds_config[\"train_batch_size\"]\n+ return ds_config\n+\n+ @staticmethod\n+ def set_deepspeed_config(hf_ds_config):\n+ from .utils import DeepSpeedPlugin, HfDeepSpeedConfig", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/432", "pr_id": 962688452 }, { "diff": "diff --git a/examples/by_feature/README.md b/examples/by_feature/README.md\nindex f0db09937..689127a35 100644\n--- a/examples/by_feature/README.md\n+++ b/examples/by_feature/README.md\n@@ -42,6 +42,18 @@ These arguments should be added at the end of any method for starting the python\n accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir \"checkpointing_tutorial\" --resume_from_checkpoint \"checkpointing_tutorial/epoch_0\"\n ```\n \n+### Cross Validation (`cross_validation.py`)\n+\n+- Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`.\n+- Arguments available:\n+ - `num_folds`, the number of folds the training dataset should be split into.\n+\n+These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:\n+\n+```bash\n+accelerate launch ./cross_validation.py --num_folds 2\n+```\n+\n ### Experiment Tracking (`tracking.py`)\n \n - Shows how to use `Accelerate.init_trackers` and `Accelerator.log`\n@@ -55,14 +67,14 @@ These arguments should be added at the end of any method for starting the python\n accelerate launch ./tracking.py --with_tracking\n ```\n \n-### Cross Validation (`cross_validation.py`)\n+### Gradient Accumulation (`gradient_accumulation.py`)\n \n-- Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`.\n+- Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup.\n - Arguments available:\n- - `num_folds`, the number of folds the training dataset should be split into.\n+ - `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad\n \n These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:\n \n ```bash\n-accelerate launch ./cross_validation.py --num_folds 2\n-```\n+accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5\n+```\n\\ No newline at end of file\ndiff --git a/examples/by_feature/gradient_accumulation.py b/examples/by_feature/gradient_accumulation.py\nnew file mode 100644\nindex 000000000..bb8aba326\n--- /dev/null\n+++ b/examples/by_feature/gradient_accumulation.py\n@@ -0,0 +1,218 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import evaluate\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+# and perform gradient accumulation\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ # New Code #\n+ gradient_accumulation_steps = int(args.gradient_accumulation_steps)\n+\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ # New code #\n+ # We use the new `no_sync` context manager to prevent gradient averaging\n+ # until we want to at the proper step if we happen to be in a distributed setup\n+ # otherwise it does nothing\n+ if step % gradient_accumulation_steps == 0:\n+ # Accumulate gradients locally\n+ with accelerator.no_sync(model):\n+ output = model(**batch)\n+ loss = output.loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ else:\n+ # Sync gradients and step\n+ output = model(**batch)\n+ loss = output.loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ # New Code #\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ type=int,\n+ default=1,\n+ help=\"The number of minibatches to be ran before gradients are accumulated.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/scripts/__init__.py b/src/accelerate/test_utils/scripts/__init__.py\nnew file mode 100644\nindex 000000000..e69de29bb\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 6ad4a3ca8..4b298d253 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -34,6 +34,7 @@\n \n EXCLUDE_EXAMPLES = [\n \"cross_validation.py\",\n+ \"gradient_accumulation.py\",\n \"multi_process_metrics.py\",\n \"memory.py\",\n \"fsdp_with_peak_mem_tracking.py\",\n@@ -215,3 +216,7 @@ def test_tracking(self):\n \"\"\".split()\n _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n+\n+ def test_gradient_accumulation(self):\n+ testargs = [\"examples/by_feature/gradient_accumulation.py\"]\n+ _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/431", "pr_id": 961998080 }, { "diff": "diff --git a/src/accelerate/test_utils/scripts/__init__.py b/src/accelerate/test_utils/scripts/__init__.py\nnew file mode 100644\nindex 000000000..e69de29bb\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/430", "pr_id": 961995275 }, { "diff": "diff --git a/src/accelerate/test_utils/scripts/__init__.py b/src/accelerate/test_utils/scripts/__init__.py\nnew file mode 100644\nindex 000000000..e69de29bb\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/429", "pr_id": 961982271 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 9205f0854..8801011ec 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import contextlib\n import gc\n import math\n import os\n@@ -355,6 +356,25 @@ def _goes_first(self, is_main):\n if is_main:\n self.wait_for_everyone()\n \n+ @contextmanager\n+ def no_sync(self, model):\n+ \"\"\"\n+ A context manager to disable gradient synchronizations across DDP processes by calling\n+ `torch.nn.parallel.DistributedDataParallel.no_sync`.\n+\n+ If `model` is not in DDP, this context manager does nothing\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ PyTorch Module that was prepared with `Accelerator.prepare`\n+ \"\"\"\n+ context = contextlib.nullcontext\n+ if self.num_processes > 1:\n+ context = getattr(model, \"no_sync\", context)\n+\n+ with context():\n+ yield\n+\n def print(self, *args, **kwargs):\n \"\"\"\n Use in replacement of `print()` to only print once per server.\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 93b04d6ab..2b59d39f6 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -31,10 +31,10 @@\n DistributedType,\n PrecisionType,\n PrepareForLaunch,\n+ get_launch_prefix,\n is_deepspeed_available,\n is_sagemaker_available,\n )\n-from accelerate.utils.versions import is_torch_version\n \n \n def launch_command_parser(subparsers=None):\n@@ -251,12 +251,7 @@ def simple_launcher(args):\n \n \n def multi_gpu_launcher(args):\n- if is_torch_version(\">=\", \"1.10.0\"):\n- cmd = [\"torchrun\"]\n- elif is_torch_version(\">=\", \"1.9.0\"):\n- cmd = [sys.executable, \"-m\", \"torch.distributed.run\"]\n- else:\n- cmd = [sys.executable, \"-m\", \"torch.distributed.launch\", \"--use_env\"]\n+ cmd = get_launch_prefix()\n if args.num_machines > 1:\n cmd.extend(\n [\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex e8dc99c8c..2d8527be2 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -8,7 +8,11 @@\n require_cpu,\n require_cuda,\n require_multi_gpu,\n+ require_single_gpu,\n require_tpu,\n slow,\n )\n from .training import RegressionDataset, RegressionModel\n+\n+\n+from .scripts import test_script, test_sync # isort:skip\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/scripts/test_script.py\nsimilarity index 100%\nrename from src/accelerate/test_utils/test_script.py\nrename to src/accelerate/test_utils/scripts/test_script.py\ndiff --git a/src/accelerate/test_utils/scripts/test_sync.py b/src/accelerate/test_utils/scripts/test_sync.py\nnew file mode 100644\nindex 000000000..2f5cc3722\n--- /dev/null\n+++ b/src/accelerate/test_utils/scripts/test_sync.py\n@@ -0,0 +1,130 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from copy import deepcopy\n+\n+import torch\n+import torch.nn.functional as F\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator\n+from accelerate.test_utils import RegressionDataset, RegressionModel\n+from accelerate.utils import DistributedType, set_seed\n+\n+\n+def step_model(model, input, target, accelerator):\n+ model.train()\n+ output = model(input)\n+ loss = F.mse_loss(output, target.to(output.device))\n+ accelerator.backward(loss)\n+\n+\n+def get_training_setup(accelerator):\n+ \"Returns everything needed to perform basic training\"\n+ set_seed(42)\n+ model = RegressionModel()\n+ model.to(accelerator.device)\n+ dset = RegressionDataset()\n+ dataloader = DataLoader(dset, batch_size=16)\n+ # Make a copy of `model`\n+ ddp_model, dataloader = accelerator.prepare(deepcopy(model), dataloader)\n+ # Use a single batch for all of the tests\n+ ddp_input, ddp_target = next(iter(dataloader)).values()\n+ return model, ddp_model, ddp_input, ddp_target\n+\n+\n+def test_noop_sync(accelerator):\n+ # Test when on a single CPU or GPU that the context manager does nothing\n+ model, ddp_model, ddp_input, ddp_target = get_training_setup(accelerator)\n+ for iteration in range(3):\n+ # Gather the distributed inputs and targs for the base model\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ # Perform our initial ground truth step in non \"DDP\"\n+ step_model(model, input, target, accelerator)\n+ # Do \"gradient accumulation\" (noop)\n+ if iteration % 2 == 0:\n+ # Accumulate grads locally\n+ with accelerator.no_sync(ddp_model):\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+ else:\n+ # Sync grads\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+\n+ # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync\n+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):\n+ if not param.requires_grad:\n+ continue\n+ assert torch.allclose(\n+ param.grad, ddp_param.grad\n+ ), f\"Gradients not in sync when they should be:\\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})\"\n+\n+ # Shuffle ddp_input on each iteration\n+ torch.manual_seed(1337 + iteration)\n+ ddp_input = ddp_input[torch.randperm(16)]\n+\n+\n+def test_distributed_sync(accelerator):\n+ # Test on distributed setup that context manager behaves properly\n+ model, ddp_model, ddp_input, ddp_target = get_training_setup(accelerator)\n+ for iteration in range(3):\n+ # Gather the distributed inputs and targs for the base model\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input, target = input.to(accelerator.device), target.to(accelerator.device)\n+ # Perform our initial ground truth step in non \"DDP\"\n+ step_model(model, input, target, accelerator)\n+ # Do \"gradient accumulation\" (noop)\n+ if iteration % 2 == 0:\n+ # Accumulate grads locally\n+ with accelerator.no_sync(ddp_model):\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+ else:\n+ # Sync grads\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+\n+ # DDP model and model should only be in sync when not (iteration % 2 == 0)\n+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):\n+ if not param.requires_grad:\n+ continue\n+ if iteration % 2 == 0:\n+ # Grads should not be in sync\n+ assert (\n+ torch.allclose(param.grad, ddp_param.grad) is False\n+ ), f\"Gradients in sync when they should not be:\\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})\"\n+ else:\n+ # Grads should be in sync\n+ assert (\n+ torch.allclose(param.grad, ddp_param.grad) is True\n+ ), f\"Gradients not in sync when they should be:\\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})\"\n+\n+ # Shuffle ddp_input on each iteration\n+ torch.manual_seed(1337 + iteration)\n+ ddp_input = ddp_input[torch.randperm(16)]\n+\n+\n+def main():\n+ accelerator = Accelerator()\n+ state = accelerator.state\n+ if state.distributed_type == DistributedType.NO:\n+ if state.local_process_index == 0:\n+ print(\"**NOOP `no_sync` gradient accumulation**\")\n+ test_noop_sync(accelerator)\n+ if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):\n+ if state.local_process_index == 0:\n+ print(\"**Distributed `no_sync` gradient accumulation**\")\n+ test_distributed_sync(accelerator)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 89d104a49..017f66d06 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -84,6 +84,14 @@ def require_tpu(test_case):\n return unittest.skipUnless(is_tpu_available(), \"test requires TPU\")(test_case)\n \n \n+def require_single_gpu(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU\n+ available or number of GPUs is more than one.\n+ \"\"\"\n+ return unittest.skipUnless(torch.cuda.device_count() == 1, \"test requires a GPU\")(test_case)\n+\n+\n def require_multi_gpu(test_case):\n \"\"\"\n Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex b66b6fd1d..a3e193915 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -85,7 +85,7 @@\n DummyScheduler,\n )\n \n-from .launch import PrepareForLaunch\n+from .launch import PrepareForLaunch, get_launch_prefix\n from .memory import find_executable_batch_size\n from .other import (\n extract_model_from_parallel,\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nindex 8ff624250..d4f0042fb 100644\n--- a/src/accelerate/utils/launch.py\n+++ b/src/accelerate/utils/launch.py\n@@ -13,12 +13,28 @@\n # limitations under the License.\n \n import os\n+import sys\n \n import torch\n \n+from ..utils import is_torch_version\n from .dataclasses import DistributedType\n \n \n+def get_launch_prefix():\n+ \"\"\"\n+ Grabs the correct launcher for starting a distributed command, such as either `torchrun`, `python -m\n+ torch.distributed.run`, etc\n+ \"\"\"\n+ if is_torch_version(\">=\", \"1.10.0\"):\n+ cmd = [\"torchrun\"]\n+ elif is_torch_version(\">=\", \"1.9.0\"):\n+ cmd = [sys.executable, \"-m\", \"torch.distributed.run\"]\n+ else:\n+ cmd = [sys.executable, \"-m\", \"torch.distributed.launch\", \"--use_env\"]\n+ return cmd\n+\n+\n class PrepareForLaunch:\n \"\"\"\n Prepare a function that will launched in a distributed setup.\ndiff --git a/tests/test_grad_sync.py b/tests/test_grad_sync.py\nnew file mode 100644\nindex 000000000..c066bd41a\n--- /dev/null\n+++ b/tests/test_grad_sync.py\n@@ -0,0 +1,56 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import os\n+import unittest\n+\n+import torch\n+\n+import accelerate\n+from accelerate import debug_launcher\n+from accelerate.test_utils import (\n+ execute_subprocess_async,\n+ require_cpu,\n+ require_multi_gpu,\n+ require_single_gpu,\n+ test_sync,\n+)\n+from accelerate.utils import get_launch_prefix, patch_environment\n+\n+\n+class SyncScheduler(unittest.TestCase):\n+ def setUp(self):\n+ mod_file = inspect.getfile(accelerate.test_utils)\n+ self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_sync.py\"])\n+\n+ @require_cpu\n+ def test_gradient_sync_single_cpu_noop(self):\n+ debug_launcher(test_sync.main)\n+ debug_launcher(test_sync.main, num_processes=1)\n+\n+ @require_cpu\n+ def test_gradient_sync_multi_cpu(self):\n+ debug_launcher(test_sync.main)\n+\n+ @require_single_gpu\n+ def test_gradient_sync_single_gpu(self):\n+ debug_launcher(test_sync.main, num_processes=1)\n+\n+ @require_multi_gpu\n+ def test_gradient_sync_multi_gpu(self):\n+ print(f\"Found {torch.cuda.device_count()} devices.\")\n+ cmd = get_launch_prefix() + [f\"--nproc_per_node={torch.cuda.device_count()}\", self.test_file_path]\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd, env=os.environ.copy())\ndiff --git a/tests/test_multigpu.py b/tests/test_multigpu.py\nindex 7ffbd94f8..2c0403ed5 100644\n--- a/tests/test_multigpu.py\n+++ b/tests/test_multigpu.py\n@@ -14,7 +14,6 @@\n \n import inspect\n import os\n-import sys\n import unittest\n \n import torch\n@@ -22,35 +21,26 @@\n import accelerate\n from accelerate import Accelerator\n from accelerate.test_utils import execute_subprocess_async, require_multi_gpu\n+from accelerate.utils import get_launch_prefix, patch_environment\n \n \n class MultiGPUTester(unittest.TestCase):\n def setUp(self):\n mod_file = inspect.getfile(accelerate.test_utils)\n- self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"test_script.py\"])\n+ self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + [\"scripts\", \"test_script.py\"])\n \n @require_multi_gpu\n def test_multi_gpu(self):\n print(f\"Found {torch.cuda.device_count()} devices.\")\n- distributed_args = f\"\"\"\n- -m torch.distributed.launch\n- --nproc_per_node={torch.cuda.device_count()}\n- --use_env\n- {self.test_file_path}\n- \"\"\".split()\n- cmd = [sys.executable] + distributed_args\n- execute_subprocess_async(cmd, env=os.environ.copy())\n+ cmd = get_launch_prefix() + [self.test_file_path]\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n \n @require_multi_gpu\n def test_pad_across_processes(self):\n- distributed_args = f\"\"\"\n- -m torch.distributed.launch\n- --nproc_per_node={torch.cuda.device_count()}\n- --use_env\n- {inspect.getfile(self.__class__)}\n- \"\"\".split()\n- cmd = [sys.executable] + distributed_args\n- execute_subprocess_async(cmd, env=os.environ.copy())\n+ cmd = get_launch_prefix() + [inspect.getfile(self.__class__)]\n+ with patch_environment(omp_num_threads=1):\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n \n \n if __name__ == \"__main__\":\n", "code_comments": [ { "body": "I would double check we are in a distributed state on top of this.", "diff_hunk": "@@ -355,6 +356,22 @@ def _goes_first(self, is_main):\n if is_main:\n self.wait_for_everyone()\n \n+ @contextmanager\n+ def no_sync(self, model):\n+ \"\"\"\n+ A context manager to disable gradient synchronizations across DDP processes by calling\n+ `torch.nn.parallel.DistributedDataParallel.no_sync`.\n+\n+ If `model` is not in DDP, this context manager does nothing\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ PyTorch Module that was prepared with `Accelerator.prepare`\n+ \"\"\"\n+ context = getattr(model, \"no_sync\", contextlib.nullcontext)", "from_author": false }, { "body": "This is not a core feature, I don't see why it should go in `accelerate test`, which is just a quick test that everything is going well. It should be tested on its own in the tests that's all.", "diff_hunk": "@@ -312,6 +315,55 @@ def training_check():\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n \n+def sync_test():", "from_author": false }, { "body": "Better variable names would help understand what this is testing. Also, those should be equal at the proper `iteration`.", "diff_hunk": "@@ -0,0 +1,63 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from copy import deepcopy\n+\n+import torch\n+import torch.nn.functional as F\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator\n+from accelerate.test_utils import RegressionDataset, RegressionModel, require_cpu\n+from accelerate.utils import set_seed\n+\n+\n+def step_model(model, input, target, accelerator):\n+ model.train()\n+ output = model(input)\n+ loss = F.mse_loss(output, target.to(output.device))\n+ accelerator.backward(loss)\n+\n+\n+@require_cpu\n+class SyncTest(unittest.TestCase):\n+ def test_noop_wrapper(self):\n+ accelerator = Accelerator()\n+ device = accelerator.device\n+ set_seed(42)\n+ model = RegressionModel()\n+ dset = RegressionDataset()\n+ dl = DataLoader(dset, batch_size=16)\n+ ddp_model, dl = accelerator.prepare(deepcopy(model), dl)\n+ model.to(device)\n+ ddp_input, ddp_target = next(iter(dl)).values()\n+\n+ for iteration in range(2):\n+ input, target = accelerator.gather((ddp_input, ddp_target))\n+ input = input.to(accelerator.device)\n+ target = target.to(accelerator.device)\n+ step_model(model, input, target, accelerator)\n+ if iteration % 2 == 0:\n+ # Accumulate grads locally\n+ with accelerator.no_sync(ddp_model):\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+ else:\n+ # Sync grads\n+ step_model(ddp_model, ddp_input, ddp_target, accelerator)\n+\n+ for i, j in zip(model.parameters(), ddp_model.parameters()):\n+ if not i.requires_grad:\n+ continue\n+ assert torch.allclose(i.grad, j.grad), f\"{i.grad} != {j.grad}\"", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger good for a rereview, slow test logs can be seen here passing: https://github.com/huggingface/accelerate/runs/6794400970?check_suite_focus=true", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/428", "pr_id": 960920764 }, { "diff": "diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nindex dbe64b5c7..9ece7b0f7 100644\n--- a/tests/deepspeed/test_deepspeed.py\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -158,7 +158,6 @@ def test_deepspeed_plugin(self, stage):\n self.assertFalse(deepspeed_plugin.zero3_init_flag)\n elif stage == ZERO3:\n self.assertTrue(deepspeed_plugin.zero3_init_flag)\n- deepspeed_plugin.deepspeed_config = None\n \n # Test `gradient_accumulation_steps` is set to 1 if unavailable in config file\n with tempfile.TemporaryDirectory() as dirpath:\n@@ -232,6 +231,10 @@ def test_deepspeed_plugin(self, stage):\n \n @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\n def test_accelerate_state_deepspeed(self, dtype):\n+ state = AcceleratorState(_from_accelerator=True)\n+ if state.initialized:\n+ state.initialized = False\n+\n deepspeed_plugin = DeepSpeedPlugin(\n gradient_accumulation_steps=1,\n gradient_clipping=1.0,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/427", "pr_id": 959938462 }, { "diff": "diff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/check_dependencies.yml\nindex 65afdd6c9..c4818ba85 100644\n--- a/.github/workflows/check_dependencies.yml\n+++ b/.github/workflows/check_dependencies.yml\n@@ -36,4 +36,5 @@ jobs:\n build-docker-containers:\n needs: check-for-setup\n if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\n- uses: ./.github/workflows/build-docker-images.yml\n\\ No newline at end of file\n+ uses: ./.github/workflows/build-docker-images.yml\n+ secrets: inherit\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@muellerzr Thank you pining for me. I read (part of) that page previously, but not in detail.\r\n\r\nSo it looks like to me that the called (reusable) workflow can't access `secrets`, and need to get it from the caller.\r\n(Only the workflows triggered by events like `push/pull_request/schedule`, etc. can access `secrets`)", "from_author": false }, { "body": "Yup exactly. And we can do so by using `secrets`, by specifying inherit it can have all of the secrets the caller has. Keeping it to be what's needed only isn't a bad idea either, rather than giving them everything. But for this one I decided all wouldn't be a terrible idea. ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/426", "pr_id": 959586682 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 45c4ae912..eb0f50ab8 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \n from setuptools import setup\n from setuptools import find_packages\n-\n+# A comment\n extras = {}\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = []\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/425", "pr_id": 959578856 }, { "diff": "diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml\nindex de8542359..c207f5515 100644\n--- a/.github/workflows/build-docker-images.yml\n+++ b/.github/workflows/build-docker-images.yml\n@@ -2,6 +2,7 @@ name: Build Docker images (scheduled)\n \n on:\n workflow_dispatch:\n+ workflow_call:\n schedule:\n - cron: \"0 1 * * *\"\n \ndiff --git a/.github/workflows/check_dependencies.yml b/.github/workflows/check_dependencies.yml\nnew file mode 100644\nindex 000000000..65afdd6c9\n--- /dev/null\n+++ b/.github/workflows/check_dependencies.yml\n@@ -0,0 +1,39 @@\n+name: Check for dependency modification\n+\n+on:\n+ push:\n+ branches:\n+ - main\n+ workflow_dispatch:\n+\n+env:\n+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n+\n+jobs:\n+ check-for-setup:\n+ runs-on: ubuntu-latest\n+ name: Check if setup was changed\n+ outputs:\n+ changed: ${{ steps.was_changed.outputs.changed }}\n+ steps:\n+ - uses: actions/checkout@v3\n+ with: \n+ fetch-depth: \"2\"\n+ \n+ - name: Get changed files\n+ id: changed-files\n+ uses: tj-actions/changed-files@v22.2\n+ \n+ - name: Was setup changed \n+ id: was_changed\n+ run: |\n+ for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n+ if [ `basename \"${file}\"` = \"setup.py\" ]; then\n+ echo ::set-output name=changed::\"1\"\n+ fi\n+ done\n+ \n+ build-docker-containers:\n+ needs: check-for-setup\n+ if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')\n+ uses: ./.github/workflows/build-docker-images.yml\n\\ No newline at end of file\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nindex 4d603bf8e..f8b9ccddb 100644\n--- a/.github/workflows/on-merge.yml\n+++ b/.github/workflows/on-merge.yml\n@@ -1,10 +1,11 @@\n name: Self-hosted runner (push to \"main\")\n \n on:\n+ workflow_run:\n+ workflows: [\"Check for dependency modification\"]\n+ branches: [\"main\"]\n+ types: [completed]\n workflow_dispatch:\n- push:\n- branches:\n- - \"main\"\n \n env:\n TESTING_MOCKED_DATALOADERS: \"1\"\n", "code_comments": [ { "body": "This is not set anywhere, so I don't think this will work (also pashing the github token on a PR is not a trivial thing as you don't get secrets on pull requests. I'd just remove this whole writing comments thingy.", "diff_hunk": "@@ -0,0 +1,97 @@\n+# Copyright 2022 The HuggingFace Team, the AllenNLP library authors. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Script to check if the setup.py was modified during a Pull Request,\n+and if so will write a comment stating that Docker Images will be \n+rebuilt upon a merge. \n+\n+Environment variables available:\n+- PR_NUMBER: The number of the currently open pull request\n+- REPO: The full name of the repository (such as 'huggingface/accelerate')\n+\"\"\"\n+\n+import os\n+import re\n+from github import Github, PullRequest\n+\n+def get_setup_diff(pull_request:PullRequest):\n+ \"\"\"Checks whether `setup.py` was changed during this pull request, and will \n+ return the diff if so\n+\n+ Args:\n+ pull_request (`PullRequest`):\n+ A pull request returned from `Github.repo.get_pull_request`\n+ \"\"\"\n+ files = pull_request.get_files()\n+ for f in files:\n+ if f.filename == \"setup.py\":\n+ return f'''```diff\\n{f.patch}\\n```'''\n+ return None\n+\n+def does_comment_exist(pull_request:PullRequest):\n+ \"\"\"Checks whether the bot has already commented on this pull request\n+\n+ Args:\n+ pull_request (`PullRequest`):\n+ A pull request returned from `Github.repo.get_pull_request`\n+ \"\"\"\n+ comments = pull_request.get_issue_comments()\n+ for c in comments:\n+ if c.user.login == \"github-actions[bot]\" and 'This PR modifies `setup.py`.' in c.body:\n+ return True\n+ return False\n+\n+def write_comment(pull_request:PullRequest, diff:str):\n+ \"\"\"Writes a comment stating that the pr modified setup.py, and that new Docker images will be built\n+\n+ Args:\n+ pull_request (`PullRequest`):\n+ A pull request returned from `Github.repo.get_pull_request`\n+ diff (`str`):\n+ The diff of the modified setup.py\n+ \"\"\"\n+ s = f'This PR modifies `setup.py`. New latest Docker images will be built and deployed before running tests once this has been merged:\\n\\n{diff}'\n+ pull_request.create_issue_comment(s)\n+\n+def update_diff(pull_request:PullRequest, diff:str):\n+ \"\"\"Updates the diff of the setup.py in the existing comment\n+\n+ Args:\n+ pull_request (`PullRequest`):\n+ A pull request returned from `Github.repo.get_pull_request`\n+ diff (`str`):\n+ The diff of the modified setup.py\n+ \"\"\"\n+ comments = pull_request.get_issue_comments()\n+ for c in comments:\n+ if c.user.login == \"github-actions[bot]\" and 'This PR modifies `setup.py`.' in c.body:\n+ comment = c\n+ break\n+ original_diff = re.search(r'```([^`]*)```', comment.body).group(0)\n+ new_body = comment.body.replace(original_diff, diff)\n+ comment.edit(new_body)\n+\n+def main():\n+ g = Github(os.environ[\"GITHUB_TOKEN\"])", "from_author": false }, { "body": "I'd remove this job personally.", "diff_hunk": "@@ -0,0 +1,68 @@\n+name: Check for dependency modification\n+\n+on:\n+ pull_request:\n+ types: [opened, synchronize, reopened]\n+ push:\n+ branches:\n+ - main\n+ workflow_dispatch:\n+\n+env:\n+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n+\n+jobs:\n+ check-for-setup:\n+ runs-on: ubuntu-latest\n+ name: Check if setup was changed\n+ outputs:\n+ changed: ${{ steps.was_changed.outputs.changed }}\n+ steps:\n+ - uses: actions/checkout@v3\n+ with: \n+ fetch-depth: \"2\"\n+ \n+ - name: Get changed files\n+ id: changed-files\n+ uses: tj-actions/changed-files@v22.2\n+ \n+ - name: Was setup changed \n+ id: was_changed\n+ run: |\n+ for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n+ if [ `basename \"${file}\"` = \"setup.py\" ]; then\n+ echo ::set-output name=changed::\"1\"\n+ fi\n+ done\n+\n+ write-message:", "from_author": false }, { "body": "Is there any reason that we should run this workflow `on pull_request`?", "diff_hunk": "@@ -0,0 +1,41 @@\n+name: Check for dependency modification\n+\n+on:\n+ pull_request:\n+ types: [opened, synchronize, reopened]", "from_author": false }, { "body": "Very nice - we should build the image only for the push event (to `main`).", "diff_hunk": "@@ -0,0 +1,41 @@\n+name: Check for dependency modification\n+\n+on:\n+ pull_request:\n+ types: [opened, synchronize, reopened]\n+ push:\n+ branches:\n+ - main\n+ workflow_dispatch:\n+\n+env:\n+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n+\n+jobs:\n+ check-for-setup:\n+ runs-on: ubuntu-latest\n+ name: Check if setup was changed\n+ outputs:\n+ changed: ${{ steps.was_changed.outputs.changed }}\n+ steps:\n+ - uses: actions/checkout@v3\n+ with: \n+ fetch-depth: \"2\"\n+ \n+ - name: Get changed files\n+ id: changed-files\n+ uses: tj-actions/changed-files@v22.2\n+ \n+ - name: Was setup changed \n+ id: was_changed\n+ run: |\n+ for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n+ if [ `basename \"${file}\"` = \"setup.py\" ]; then\n+ echo ::set-output name=changed::\"1\"\n+ fi\n+ done\n+ \n+ build-docker-containers:\n+ needs: check-for-setup\n+ if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')", "from_author": false }, { "body": "I was thinking that `completed` means the CI will be run only if the workflow `Check for dependency modification` is successful.\r\n\r\nAfter reading this [doc](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-a-workflow-based-on-the-conclusion-of-another-workflow), it is not the case. And this is good, as we want to run the push CI anyway (even if something wrong in the setup check).\r\n\r\n", "diff_hunk": "@@ -1,10 +1,11 @@\n name: Self-hosted runner (push to \"main\")\n \n on:\n+ workflow_run:\n+ workflows: [\"Check for dependency modification\"]\n+ branches: [\"main\"]\n+ types: [completed]", "from_author": false }, { "body": "Yup exactly", "diff_hunk": "@@ -1,10 +1,11 @@\n name: Self-hosted runner (push to \"main\")\n \n on:\n+ workflow_run:\n+ workflows: [\"Check for dependency modification\"]\n+ branches: [\"main\"]\n+ types: [completed]", "from_author": true }, { "body": "This was due to the overengineering part. We definitely could just trigger it on a push to main. ", "diff_hunk": "@@ -0,0 +1,41 @@\n+name: Check for dependency modification\n+\n+on:\n+ pull_request:\n+ types: [opened, synchronize, reopened]", "from_author": true }, { "body": "Yeah, I meanly mean `push` in this remark, and you are doing it as what I would expect. (`to main` is to make it more clear only πŸ˜„ )", "diff_hunk": "@@ -0,0 +1,41 @@\n+name: Check for dependency modification\n+\n+on:\n+ pull_request:\n+ types: [opened, synchronize, reopened]\n+ push:\n+ branches:\n+ - main\n+ workflow_dispatch:\n+\n+env:\n+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n+\n+jobs:\n+ check-for-setup:\n+ runs-on: ubuntu-latest\n+ name: Check if setup was changed\n+ outputs:\n+ changed: ${{ steps.was_changed.outputs.changed }}\n+ steps:\n+ - uses: actions/checkout@v3\n+ with: \n+ fetch-depth: \"2\"\n+ \n+ - name: Get changed files\n+ id: changed-files\n+ uses: tj-actions/changed-files@v22.2\n+ \n+ - name: Was setup changed \n+ id: was_changed\n+ run: |\n+ for file in ${{ steps.changed-files.outputs.all_changed_files }}; do\n+ if [ `basename \"${file}\"` = \"setup.py\" ]; then\n+ echo ::set-output name=changed::\"1\"\n+ fi\n+ done\n+ \n+ build-docker-containers:\n+ needs: check-for-setup\n+ if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1')", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@ydshieh you can see my almost 250 debug logs here: https://github.com/muellerzr/accelerate/actions\r\n\r\n**Very** *very* comfortable with this working", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/424", "pr_id": 957763227 }, { "diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex 1764e2381..d52cf5e1e 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -5,12 +5,14 @@ on:\n schedule:\n - cron: \"0 2 * * *\"\n \n+env:\n+ RUN_SLOW: \"yes\"\n+\n jobs:\n run_all_tests_single_gpu:\n runs-on: [self-hosted, docker-gpu, multi-gpu]\n env:\n CUDA_VISIBLE_DEVICES: \"0\"\n- ALL_SLOW: \"yes\"\n container:\n image: huggingface/accelerate-gpu:latest\n options: --gpus all --shm-size \"16gb\"\n@@ -38,7 +40,6 @@ jobs:\n run_all_tests_multi_gpu:\n runs-on: [self-hosted, docker-gpu, multi-gpu]\n env:\n- ALL_SLOW: \"yes\"\n CUDA_VISIBLE_DEVICES: \"0,1\"\n container:\n image: huggingface/accelerate-gpu:latest\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/421", "pr_id": 954903613 }, { "diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex d50aab329..1764e2381 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -23,12 +23,9 @@ jobs:\n run: |\n source activate accelerate\n git config --global --add safe.directory '*'\n- git fetch && git checkout ${{ github.sha }}\n- pip install tensorflow -U \n+ git fetch && git checkout ${{ github.sha }} \n pip install -e . --no-deps\n \n-# Note: tensorflow upgrade is needed until dropped py 3.6 support\n-\n - name: Run test on GPUs\n run: |\n source activate accelerate\n@@ -56,11 +53,8 @@ jobs:\n source activate accelerate\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n- pip install tensorflow -U\n pip install -e . --no-deps\n \n- # Note: tensorflow upgrade is needed until dropped py 3.6 support\n-\n - name: Run test on GPUs\n run: |\n source activate accelerate\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nindex 8df455563..4d603bf8e 100644\n--- a/.github/workflows/on-merge.yml\n+++ b/.github/workflows/on-merge.yml\n@@ -6,6 +6,9 @@ on:\n branches:\n - \"main\"\n \n+env:\n+ TESTING_MOCKED_DATALOADERS: \"1\"\n+\n jobs:\n run_all_tests_single_gpu:\n runs-on: [self-hosted, docker-gpu, multi-gpu]\n@@ -24,10 +27,7 @@ jobs:\n source activate accelerate\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n- pip install tensorflow -U \n- pip install -e . -U\n-\n-# Note: tensorflow upgrade is needed until dropped py 3.6 support\n+ pip install -e .[test,test_trackers]\n \n - name: Run test on GPUs\n run: |\n@@ -53,10 +53,7 @@ jobs:\n source activate accelerate\n git config --global --add safe.directory '*'\n git fetch && git checkout ${{ github.sha }}\n- pip install tensorflow -U\n- pip install -e . -U\n-\n- # Note: tensorflow upgrade is needed until dropped py 3.6 support\n+ pip install -e .[test,test_trackers]\n \n - name: Run test on GPUs\n run: |\ndiff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml\nindex 9b54a269f..5d4707b4e 100644\n--- a/.github/workflows/quality.yml\n+++ b/.github/workflows/quality.yml\n@@ -7,10 +7,10 @@ jobs:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v2\n- - name: Set up Python 3.6\n- uses: actions/setup-python@v2\n+ - name: Set up Python 3.7\n+ uses: actions/setup-python@v3\n with:\n- python-version: 3.6\n+ python-version: 3.7\n - name: Install Python dependencies\n run: pip install -e .[quality]\n - name: Run Quality check\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 2bd407d85..d2de25a79 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -4,6 +4,7 @@ on: [pull_request]\n \n env:\n HF_HOME: ~/hf_cache\n+ TESTING_MOCKED_DATALOADERS: \"1\"\n \n jobs:\n run-tests:\n@@ -35,7 +36,7 @@ jobs:\n - name: Install the library\n run: |\n pip install --upgrade pip\n- pip install -e .[test,test_trackers,tensorboard]\n+ pip install -e .[test,test_trackers]\n \n - name: Run Tests\n run: |\ndiff --git a/docker/accelerate-cpu/Dockerfile b/docker/accelerate-cpu/Dockerfile\nindex 1d99c4035..5d1286772 100644\n--- a/docker/accelerate-cpu/Dockerfile\n+++ b/docker/accelerate-cpu/Dockerfile\n@@ -1,7 +1,7 @@\n # Builds CPU-only Docker image of PyTorch\n # Uses multi-staged approach to reduce size\n # Stage 1\n-FROM python:3.6-slim as compile-image\n+FROM python:3.7-slim as compile-image\n \n ARG DEBIAN_FRONTEND=noninteractive\n \n@@ -25,7 +25,7 @@ RUN python3 -m pip install --no-cache-dir \\\n --extra-index-url https://download.pytorch.org/whl/cpu\n \n # Stage 2\n-FROM python:3.6-slim AS build-image\n+FROM python:3.7-slim AS build-image\n COPY --from=compile-image /opt/venv /opt/venv\n RUN useradd -ms /bin/bash user\n USER user\ndiff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nindex f495d9063..746e65350 100644\n--- a/docker/accelerate-gpu/Dockerfile\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -4,7 +4,7 @@\n # Use base conda image to reduce time\n FROM continuumio/miniconda3:latest AS compile-image\n # Specify py version\n-ENV PYTHON_VERSION=3.6\n+ENV PYTHON_VERSION=3.7.3\n # Install apt libs\n RUN apt-get update && \\\n apt-get install -y curl git wget && \\\ndiff --git a/setup.py b/setup.py\nindex 51a6c1a01..a28b9254c 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -29,8 +29,8 @@\n \"scipy\",\n \"sklearn\"\n ]\n-extras[\"tensorboard\"] = [\"protobuf<=3.20.1\", \"tensorflow>=2.6.2\", \"tensorboard\"]\n-extras[\"test_trackers\"] = extras[\"tensorboard\"] + [\"wandb\", \"comet-ml\"]\n+\n+extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n \n extras[\"sagemaker\"] = [\n@@ -57,7 +57,7 @@\n \"accelerate-launch=accelerate.commands.launch:main\",\n ]\n },\n- python_requires=\">=3.6.0\",\n+ python_requires=\">=3.7.0\",\n install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n@@ -68,7 +68,6 @@\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 013dfad44..91dc0f7ad 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -26,7 +26,7 @@\n import torch\n \n from ..state import AcceleratorState\n-from ..utils import gather, is_comet_ml_available, is_tensorflow_available, is_tpu_available, is_wandb_available\n+from ..utils import gather, is_comet_ml_available, is_tensorboard_available, is_tpu_available, is_wandb_available\n \n \n def parse_flag_from_env(key, default=False):\n@@ -85,12 +85,12 @@ def require_multi_gpu(test_case):\n return unittest.skipUnless(torch.cuda.device_count() > 1, \"test requires multiple GPUs\")(test_case)\n \n \n-def require_tensorflow(test_case):\n+def require_tensorboard(test_case):\n \"\"\"\n- Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n+ Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't\n installed\n \"\"\"\n- return unittest.skipUnless(is_tensorflow_available(), \"test requires TensorFlow\")(test_case)\n+ return unittest.skipUnless(is_tensorboard_available(), \"test requires Tensorboard\")(test_case)\n \n \n def require_wandb(test_case):\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 186f27bb7..a39368dd3 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -26,7 +26,6 @@\n is_deepspeed_available,\n is_sagemaker_available,\n is_tensorboard_available,\n- is_tensorflow_available,\n is_tpu_available,\n is_wandb_available,\n )\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex b492f2f9c..947508726 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -63,10 +63,6 @@ def is_deepspeed_available():\n return False\n \n \n-def is_tensorflow_available():\n- return importlib.util.find_spec(\"tensorflow\") is not None\n-\n-\n def is_tensorboard_available():\n return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n \ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\nindex be2238794..852b10c3b 100644\n--- a/tests/test_big_modeling.py\n+++ b/tests/test_big_modeling.py\n@@ -94,14 +94,18 @@ def test_cpu_offload(self):\n \n cpu_offload(model, execution_device=device)\n output = model(x)\n- self.assertTrue(torch.allclose(expected, output.cpu()))\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n \n # Clean up for next test.\n remove_hook_from_submodules(model)\n \n cpu_offload(model, execution_device=device, offload_buffers=True)\n output = model(x)\n- self.assertTrue(torch.allclose(expected, output.cpu()))\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n \n @slow\n @require_cuda\n@@ -127,7 +131,9 @@ def test_disk_offload(self):\n with TemporaryDirectory() as tmp_dir:\n disk_offload(model, tmp_dir, execution_device=device)\n output = model(x)\n- self.assertTrue(torch.allclose(expected, output.cpu()))\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n \n # Clean up for next test.\n remove_hook_from_submodules(model)\n@@ -135,7 +141,9 @@ def test_disk_offload(self):\n with TemporaryDirectory() as tmp_dir:\n disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)\n output = model(x)\n- self.assertTrue(torch.allclose(expected, output.cpu()))\n+ self.assertTrue(\n+ torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n+ )\n \n @slow\n @require_cuda\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex f02db8d21..a37ae7128 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -30,31 +30,22 @@\n MockingTestCase,\n TempDirTestCase,\n require_comet_ml,\n- require_tensorflow,\n+ require_tensorboard,\n require_wandb,\n )\n from accelerate.tracking import CometMLTracker, GeneralTracker\n-from accelerate.utils import is_comet_ml_available, is_tensorflow_available\n+from accelerate.utils import is_comet_ml_available\n \n \n if is_comet_ml_available():\n from comet_ml import OfflineExperiment\n \n-\n-if is_tensorflow_available():\n- import tensorflow as tf\n- from tensorboard.plugins.hparams import plugin_data_pb2\n- from tensorflow.core.util import event_pb2\n- from tensorflow.python.summary.summary_iterator import summary_iterator\n-\n-\n logger = logging.getLogger(__name__)\n \n \n+@require_tensorboard\n class TensorBoardTrackingTest(unittest.TestCase):\n- @require_tensorflow\n def test_init_trackers(self):\n- hps = None\n project_name = \"test_project_with_config\"\n with tempfile.TemporaryDirectory() as dirpath:\n accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n@@ -63,29 +54,9 @@ def test_init_trackers(self):\n accelerator.end_training()\n for child in Path(f\"{dirpath}/{project_name}\").glob(\"*/**\"):\n log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n- # The config log is stored one layer deeper in the logged directory\n- # And names are randomly generated each time\n- si = summary_iterator(str(log))\n- # Pull HPS through careful parsing\n- for event in si:\n- for value in event.summary.value:\n- proto_bytes = value.metadata.plugin_data.content\n- plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n- if plugin_data.HasField(\"session_start_info\"):\n- hps = dict(plugin_data.session_start_info.hparams)\n-\n- self.assertTrue(isinstance(hps, dict))\n- keys = list(hps.keys())\n- keys.sort()\n- self.assertEqual(keys, [\"learning_rate\", \"num_iterations\", \"some_boolean\", \"some_string\"])\n- self.assertEqual(hps[\"num_iterations\"].number_value, 12)\n- self.assertEqual(hps[\"learning_rate\"].number_value, 0.01)\n- self.assertEqual(hps[\"some_boolean\"].bool_value, False)\n- self.assertEqual(hps[\"some_string\"].string_value, \"some_value\")\n+ self.assertNotEqual(str(log), \"\")\n \n- @require_tensorflow\n def test_log(self):\n- step = None\n project_name = \"test_project_with_log\"\n with tempfile.TemporaryDirectory() as dirpath:\n accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n@@ -96,21 +67,7 @@ def test_log(self):\n # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n # Names are randomly generated each time\n log = list(filter(lambda x: x.is_file(), Path(f\"{dirpath}/{project_name}\").iterdir()))[0]\n- serialized_examples = tf.data.TFRecordDataset(log)\n- for e in serialized_examples:\n- event = event_pb2.Event.FromString(e.numpy())\n- if step is None:\n- step = event.step\n- for value in event.summary.value:\n- if value.tag == \"total_loss\":\n- total_loss = value.simple_value\n- elif value.tag == \"iteration\":\n- iteration = value.simple_value\n- elif value.tag == \"my_text/text_summary\": # Append /text_summary to the key\n- my_text = value.tensor.string_val[0].decode()\n- self.assertAlmostEqual(total_loss, values[\"total_loss\"])\n- self.assertEqual(iteration, values[\"iteration\"])\n- self.assertEqual(my_text, values[\"my_text\"])\n+ self.assertNotEqual(str(log), \"\")\n \n def test_logging_dir(self):\n with self.assertRaisesRegex(ValueError, \"Logging with `tensorboard` requires a `logging_dir`\"):\n", "code_comments": [ { "body": "```suggestion\r\n return unittest.skipUnless(is_tensorboard_available(), \"test requires Tensorboard\")(test_case)\r\n```", "diff_hunk": "@@ -85,12 +85,11 @@ def require_multi_gpu(test_case):\n return unittest.skipUnless(torch.cuda.device_count() > 1, \"test requires multiple GPUs\")(test_case)\n \n \n-def require_tensorflow(test_case):\n+def require_tensorboard(test_case):\n \"\"\"\n- Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n- installed\n+ Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't installed\n \"\"\"\n- return unittest.skipUnless(is_tensorflow_available(), \"test requires TensorFlow\")(test_case)\n+ return unittest.skipUnless(is_tensorboard_available(), \"test requires wandb\")(test_case)", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/420", "pr_id": 954757453 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex d764d3927..2bd407d85 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -2,33 +2,41 @@ name: Run Tests\n \n on: [pull_request]\n \n+env:\n+ HF_HOME: ~/hf_cache\n+\n jobs:\n- test:\n+ run-tests:\n runs-on: ubuntu-latest\n+ strategy:\n+ matrix:\n+ test-kind: [\n+ test,\n+ test_example_differences,\n+ test_checkpoint_step,\n+ test_checkpoint_epoch,\n+ test_rest\n+ ]\n steps:\n- - uses: actions/checkout@v2\n- - name: Set up Python 3.7\n- uses: actions/setup-python@v2\n+ - uses: actions/checkout@v3\n+ - name: Set up python 3.7\n+ uses: actions/setup-python@v3\n with:\n python-version: 3.7\n- - name: Install Python dependencies\n- run: |\n- pip install --upgrade pip\n- pip install -e .[test,test_trackers]\n- - name: Run Tests\n- run: make test\n- \n- test_examples:\n- runs-on: ubuntu-latest\n- steps:\n- - uses: actions/checkout@v2\n- - name: Set up Python 3.7\n- uses: actions/setup-python@v2\n+ \n+ - name: Activate python cache\n+ uses: actions/cache@v3\n with:\n- python-version: 3.7\n- - name: Install Python dependencies\n+ path: |\n+ ${{ env.pythonLocation }}\n+ ${{ env.HF_HOME }}\n+ key: ${{ env.pythonLocation }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}\n+ \n+ - name: Install the library\n run: |\n pip install --upgrade pip\n- pip install -e .[test,tensorboard]\n+ pip install -e .[test,test_trackers,tensorboard]\n+ \n - name: Run Tests\n- run: make test_examples\n+ run: |\n+ make ${{ matrix.test-kind }}\n\\ No newline at end of file\ndiff --git a/Makefile b/Makefile\nindex f4534e12c..38cb16cfc 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -29,3 +29,16 @@ test:\n \n test_examples:\n \tpython -m pytest -s -v ./tests/test_examples.py\n+\n+# Broken down example tests for the CI runners\n+test_example_differences:\n+\tpython -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests\n+\n+test_checkpoint_epoch:\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_epoch\"\n+\n+test_checkpoint_step:\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"by_step\"\n+\n+test_rest:\n+\tpython -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k \"not by_step and not by_epoch\"\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/419", "pr_id": 954039784 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex e9d797083..d764d3927 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -7,12 +7,14 @@ jobs:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v2\n- - name: Set up Python 3.6\n+ - name: Set up Python 3.7\n uses: actions/setup-python@v2\n with:\n- python-version: 3.6\n+ python-version: 3.7\n - name: Install Python dependencies\n- run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n+ run: |\n+ pip install --upgrade pip\n+ pip install -e .[test,test_trackers]\n - name: Run Tests\n run: make test\n \n@@ -20,11 +22,13 @@ jobs:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v2\n- - name: Set up Python 3.6\n+ - name: Set up Python 3.7\n uses: actions/setup-python@v2\n with:\n- python-version: 3.6\n+ python-version: 3.7\n - name: Install Python dependencies\n- run: pip install setuptools==59.5.0; pip install -e .[test] tensorboard\n+ run: |\n+ pip install --upgrade pip\n+ pip install -e .[test,tensorboard]\n - name: Run Tests\n run: make test_examples\ndiff --git a/examples/README.md b/examples/README.md\nindex bb248e9d1..271ffd3c5 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -23,7 +23,7 @@ The [nlp_example.py](./nlp_example.py) script is a simple example to train a Ber\n Prior to running it you should install πŸ€— Dataset and πŸ€— Transformers:\n \n ```bash\n-pip install datasets transformers\n+pip install datasets evaluate transformers\n ```\n \n The same script can be run in any of the following configurations:\ndiff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 06edd8540..c06d06ee9 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -18,8 +18,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import (\n AdamW,\n AutoModelForSequenceClassification,\n@@ -137,7 +138,7 @@ def training_function(config, args):\n set_seed(seed)\n \n train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nindex 9fdc3fe72..53d582c67 100644\n--- a/examples/by_feature/cross_validation.py\n+++ b/examples/by_feature/cross_validation.py\n@@ -19,8 +19,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import DatasetDict, load_dataset, load_metric\n+from datasets import DatasetDict, load_dataset\n \n # New Code #\n # We'll be using StratifiedKFold for this example\n@@ -144,7 +145,7 @@ def training_function(config, args):\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 8304bcefe..4f0311785 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -19,8 +19,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n \n \n@@ -119,7 +120,7 @@ def training_function(config, args):\n \n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n datasets = load_dataset(\"glue\", \"mrpc\")\n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n def tokenize_function(examples):\n # max_length=None => use the model max length (it's actually the default)\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 140f14d75..ac4b45866 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -17,11 +17,11 @@\n import torch\n from torch.utils.data import DataLoader\n \n-from accelerate import Accelerator, DistributedType\n-\n # New Code #\n+import evaluate\n+from accelerate import Accelerator, DistributedType\n from accelerate.utils import find_executable_batch_size\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import (\n AdamW,\n AutoModelForSequenceClassification,\n@@ -121,7 +121,7 @@ def training_function(config, args):\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex 6babf879a..0307d251f 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -18,8 +18,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import (\n AdamW,\n AutoModelForSequenceClassification,\n@@ -122,7 +123,7 @@ def training_function(config, args):\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 9538b21b1..4020b3382 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -18,8 +18,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import (\n AdamW,\n AutoModelForSequenceClassification,\n@@ -132,7 +133,7 @@ def training_function(config, args):\n set_seed(seed)\n \n train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 9c8b89706..8544d1279 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -18,8 +18,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import (\n AdamW,\n AutoModelForSequenceClassification,\n@@ -89,7 +90,7 @@ def training_function(config, args):\n \n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n def tokenize_function(examples):\n # max_length=None => use the model max length (it's actually the default)\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 87cfd7698..84c24fd11 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -17,8 +17,9 @@\n import torch\n from torch.utils.data import DataLoader\n \n+import evaluate\n from accelerate import Accelerator, DistributedType\n-from datasets import load_dataset, load_metric\n+from datasets import load_dataset\n from transformers import (\n AdamW,\n AutoModelForSequenceClassification,\n@@ -106,7 +107,7 @@ def training_function(config, args):\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n- metric = load_metric(\"glue\", \"mrpc\")\n+ metric = evaluate.load(\"glue\", \"mrpc\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\ndiff --git a/setup.py b/setup.py\nindex 5f6e128c1..51a6c1a01 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -24,11 +24,13 @@\n \"pytest-xdist\",\n \"pytest-subtests\",\n \"datasets\",\n+ \"evaluate\",\n \"transformers\",\n \"scipy\",\n \"sklearn\"\n ]\n-extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow>=2.6.2\", \"tensorboard\"]\n+extras[\"tensorboard\"] = [\"protobuf<=3.20.1\", \"tensorflow>=2.6.2\", \"tensorboard\"]\n+extras[\"test_trackers\"] = extras[\"tensorboard\"] + [\"wandb\", \"comet-ml\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n \n extras[\"sagemaker\"] = [\n", "code_comments": [ { "body": "There's some micro optimization that can be done I found that can save us a number of minutes on each CI, and will actually let this no longer need to be a thing. I'll make a PR after this with that, it'll simplify some things quite a bit :) ", "diff_hunk": "@@ -7,24 +7,28 @@ jobs:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v2\n- - name: Set up Python 3.6\n+ - name: Set up Python 3.7\n uses: actions/setup-python@v2\n with:\n- python-version: 3.6\n+ python-version: 3.7\n - name: Install Python dependencies\n- run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n+ run: |\n+ pip install --upgrade pip\n+ pip install -e .[test,test_trackers]", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/417", "pr_id": 952408105 }, { "diff": "diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nindex 11946b544..d50aab329 100644\n--- a/.github/workflows/nightly.yml\n+++ b/.github/workflows/nightly.yml\n@@ -32,7 +32,7 @@ jobs:\n - name: Run test on GPUs\n run: |\n source activate accelerate\n- make test_cuda\n+ make test\n - name: Run examples on GPUs\n run: |\n source activate accelerate\n@@ -64,7 +64,7 @@ jobs:\n - name: Run test on GPUs\n run: |\n source activate accelerate\n- make test_cuda\n+ make test\n \n - name: Run examples on GPUs\n run: |\ndiff --git a/.github/workflows/on-merge.yml b/.github/workflows/on-merge.yml\nnew file mode 100644\nindex 000000000..8df455563\n--- /dev/null\n+++ b/.github/workflows/on-merge.yml\n@@ -0,0 +1,69 @@\n+name: Self-hosted runner (push to \"main\")\n+\n+on:\n+ workflow_dispatch:\n+ push:\n+ branches:\n+ - \"main\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install tensorflow -U \n+ pip install -e . -U\n+\n+# Note: tensorflow upgrade is needed until dropped py 3.6 support\n+\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n+\n+ run_all_tests_multi_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install tensorflow -U\n+ pip install -e . -U\n+\n+ # Note: tensorflow upgrade is needed until dropped py 3.6 support\n+\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test\n+\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "Thinking on one of your frustrations @sgugger, I decided to add a `pip install -U` instead of `--no-deps` so that if a dep was added on a push, we can test it (and not instead have all those failing tests on a push to main and freak out πŸ˜‰ )", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/416", "pr_id": 952337121 }, { "diff": "diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml\nnew file mode 100644\nindex 000000000..69a101307\n--- /dev/null\n+++ b/.github/ISSUE_TEMPLATE/bug-report.yml\n@@ -0,0 +1,59 @@\n+name: \"\\U0001F41B Bug Report\"\n+description: Submit a bug report to help us improve Accelerate\n+labels: [ \"bug\" ]\n+body:\n+ - type: textarea\n+ id: system-info\n+ attributes:\n+ label: System Info\n+ description: Please share your accelerate configuration with us. You can run the command `accelerate env` and copy-paste its outputs below\n+ render: Shell\n+ placeholder: accelerate version, OS, python version, numpy version, torch version, and accelerate's configuration\n+ validations:\n+ required: true\n+ \n+ - type: checkboxes\n+ id: information-scripts-examples\n+ attributes:\n+ label: Information\n+ description: 'The problem arises when using:'\n+ options:\n+ - label: \"The official example scripts\"\n+ - label: \"My own modified scripts\"\n+ \n+ - type: checkboxes\n+ id: information-tasks\n+ attributes:\n+ label: Tasks\n+ description: \"The tasks I am working on are:\"\n+ options:\n+ - label: \"One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\"\n+ - label: \"My own task or dataset (give details below)\"\n+ \n+ - type: textarea\n+ id: reproduction\n+ validations:\n+ required: true\n+ attributes:\n+ label: Reproduction\n+ description: |\n+ Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.\n+ If you have code snippets, error messages, stack traces please provide them here as well.\n+ Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting\n+ Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.\n+\n+ placeholder: |\n+ Steps to reproduce the behavior:\n+ \n+ 1.\n+ 2.\n+ 3.\n+\n+ - type: textarea\n+ id: expected-behavior\n+ validations:\n+ required: true\n+ attributes:\n+ label: Expected behavior\n+ description: \"A clear and concise description of what you would expect to happen.\"\n+ render: Shell\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/415", "pr_id": 952306197 }, { "diff": "diff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nindex 6195ae2ae..f495d9063 100644\n--- a/docker/accelerate-gpu/Dockerfile\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -22,7 +22,7 @@ SHELL [\"/bin/bash\", \"-c\"]\n # Activate the conda env and install torch + accelerate\n RUN source activate accelerate && \\\n python3 -m pip install --no-cache-dir \\\n- git+https://github.com/huggingface/accelerate@nightly-runner#egg=accelerate[test,test_trackers] \\\n+ git+https://github.com/huggingface/accelerate#egg=accelerate[test,test_trackers] \\\n --extra-index-url https://download.pytorch.org/whl/cu113\n \n # Stage 2\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/414", "pr_id": 952293471 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 4613e1e8d..e9d797083 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -14,7 +14,7 @@ jobs:\n - name: Install Python dependencies\n run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n - name: Run Tests\n- run: make test_cpu\n+ run: make test\n \n test_examples:\n runs-on: ubuntu-latest\ndiff --git a/Makefile b/Makefile\nindex d2822153b..f4534e12c 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -24,12 +24,8 @@ style:\n \tpython utils/style_doc.py src/accelerate docs/source --max_len 119\n \t\n # Run tests for the library\n-test_cpu:\n+test:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py\n \n-test_cuda:\n-\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/test_scheduler.py --ignore=./tests/test_cpu.py\n-\tpython -m pytest -s -v ./tests/test_cpu.py ./tests/test_scheduler.py\n-\n test_examples:\n \tpython -m pytest -s -v ./tests/test_examples.py\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex 46723ebfa..e8dc99c8c 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -2,5 +2,13 @@\n # There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n # module, but to preserve other warnings. So, don't check this module at all.\n \n-from .testing import are_the_same_tensors, execute_subprocess_async, require_cuda, require_multi_gpu, require_tpu, slow\n+from .testing import (\n+ are_the_same_tensors,\n+ execute_subprocess_async,\n+ require_cpu,\n+ require_cuda,\n+ require_multi_gpu,\n+ require_tpu,\n+ slow,\n+)\n from .training import RegressionDataset, RegressionModel\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 9379eb0b9..013dfad44 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -56,6 +56,13 @@ def slow(test_case):\n return unittest.skipUnless(_run_slow_tests, \"test is slow\")(test_case)\n \n \n+def require_cpu(test_case):\n+ \"\"\"\n+ Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available.\n+ \"\"\"\n+ return unittest.skipUnless(not torch.cuda.is_available(), \"test requires only a CPU\")(test_case)\n+\n+\n def require_cuda(test_case):\n \"\"\"\n Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.\ndiff --git a/tests/test_cpu.py b/tests/test_cpu.py\nindex 63ee69a97..ab73058c1 100644\n--- a/tests/test_cpu.py\n+++ b/tests/test_cpu.py\n@@ -15,9 +15,10 @@\n import unittest\n \n from accelerate import debug_launcher\n-from accelerate.test_utils import test_script\n+from accelerate.test_utils import require_cpu, test_script\n \n \n+@require_cpu\n class MultiCPUTester(unittest.TestCase):\n def test_cpu(self):\n debug_launcher(test_script.main)\ndiff --git a/tests/test_scheduler.py b/tests/test_scheduler.py\nindex 8ae9e56ae..be4f975fb 100644\n--- a/tests/test_scheduler.py\n+++ b/tests/test_scheduler.py\n@@ -18,6 +18,7 @@\n import torch\n \n from accelerate import Accelerator, debug_launcher\n+from accelerate.test_utils import require_cpu\n \n \n def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n@@ -46,6 +47,7 @@ def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_ba\n ), f\"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}\"\n \n \n+@require_cpu\n class SchedulerTester(unittest.TestCase):\n def test_scheduler_steps_with_optimizer_single_process(self):\n debug_launcher(partial(scheduler_test, num_processes=1), num_processes=1)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/413", "pr_id": 952279140 }, { "diff": "diff --git a/src/accelerate/test_utils/training.py b/src/accelerate/test_utils/training.py\nindex c26587abe..7345b93cb 100644\n--- a/src/accelerate/test_utils/training.py\n+++ b/src/accelerate/test_utils/training.py\n@@ -17,8 +17,6 @@\n from torch.utils.data import DataLoader\n \n from accelerate.utils.dataclasses import DistributedType\n-from datasets import load_dataset\n-from transformers import AutoTokenizer\n \n \n class RegressionDataset:\n@@ -51,6 +49,9 @@ def forward(self, x=None):\n \n \n def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ from datasets import load_dataset\n+ from transformers import AutoTokenizer\n+\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n datasets = load_dataset(\"csv\", data_files=data_files)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks folks, for the library and the fix!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/411", "pr_id": 949605127 }, { "diff": "diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml\nindex 6b7987e51..de8542359 100644\n--- a/.github/workflows/build-docker-images.yml\n+++ b/.github/workflows/build-docker-images.yml\n@@ -1,7 +1,7 @@\n name: Build Docker images (scheduled)\n \n on:\n- repository_dispatch:\n+ workflow_dispatch:\n schedule:\n - cron: \"0 1 * * *\"\n \ndiff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml\nnew file mode 100644\nindex 000000000..11946b544\n--- /dev/null\n+++ b/.github/workflows/nightly.yml\n@@ -0,0 +1,72 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install tensorflow -U \n+ pip install -e . --no-deps\n+\n+# Note: tensorflow upgrade is needed until dropped py 3.6 support\n+\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_cuda\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n+\n+ run_all_tests_multi_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ ALL_SLOW: \"yes\"\n+ CUDA_VISIBLE_DEVICES: \"0,1\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install tensorflow -U\n+ pip install -e . --no-deps\n+\n+ # Note: tensorflow upgrade is needed until dropped py 3.6 support\n+\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_cuda\n+\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n\\ No newline at end of file\ndiff --git a/docker/accelerate-cpu/Dockerfile b/docker/accelerate-cpu/Dockerfile\nindex c536fb70c..1d99c4035 100644\n--- a/docker/accelerate-cpu/Dockerfile\n+++ b/docker/accelerate-cpu/Dockerfile\n@@ -21,8 +21,8 @@ WORKDIR /workspace\n RUN python3 -m pip install --upgrade --no-cache-dir pip\n RUN python3 -m pip install --no-cache-dir \\\n jupyter \\\n- torch --extra-index-url https://download.pytorch.org/whl/cpu \\\n- git+https://github.com/huggingface/accelerate#egg=accelerate[dev]\n+ git+https://github.com/huggingface/accelerate#egg=accelerate[test,test_trackers] \\\n+ --extra-index-url https://download.pytorch.org/whl/cpu\n \n # Stage 2\n FROM python:3.6-slim AS build-image\ndiff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nindex 299f573d8..6195ae2ae 100644\n--- a/docker/accelerate-gpu/Dockerfile\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -4,7 +4,7 @@\n # Use base conda image to reduce time\n FROM continuumio/miniconda3:latest AS compile-image\n # Specify py version\n-ENV PYTHON_VERSION=3.6 \n+ENV PYTHON_VERSION=3.6\n # Install apt libs\n RUN apt-get update && \\\n apt-get install -y curl git wget && \\\n@@ -22,17 +22,21 @@ SHELL [\"/bin/bash\", \"-c\"]\n # Activate the conda env and install torch + accelerate\n RUN source activate accelerate && \\\n python3 -m pip install --no-cache-dir \\\n- torch --extra-index-url https://download.pytorch.org/whl/cu113 \\\n- git+https://github.com/huggingface/accelerate#egg=accelerate[dev]\n+ git+https://github.com/huggingface/accelerate@nightly-runner#egg=accelerate[test,test_trackers] \\\n+ --extra-index-url https://download.pytorch.org/whl/cu113\n \n # Stage 2\n FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image\n COPY --from=compile-image /opt/conda /opt/conda\n ENV PATH /opt/conda/bin:$PATH\n \n-RUN echo \"source activate accelerate\" >> /.bashrc\n+# Install apt libs\n+RUN apt-get update && \\\n+ apt-get install -y curl git wget && \\\n+ apt-get clean && \\\n+ rm -rf /var/lib/apt/lists*\n+\n+RUN echo \"source activate accelerate\" >> ~/.profile\n \n-RUN useradd -ms /bin/bash user\n-USER user\n # Activate the virtualenv\n CMD [\"/bin/bash\"]\n\\ No newline at end of file\ndiff --git a/setup.py b/setup.py\nindex dcfe3fd8e..5f6e128c1 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,6 +19,7 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = []\n extras[\"test\"] = [\n+ \"psutil\",\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n@@ -27,7 +28,7 @@\n \"scipy\",\n \"sklearn\"\n ]\n-extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow\"]\n+extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow>=2.6.2\", \"tensorboard\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n \n extras[\"sagemaker\"] = [\ndiff --git a/src/accelerate/test_utils/training.py b/src/accelerate/test_utils/training.py\nindex c26587abe..7345b93cb 100644\n--- a/src/accelerate/test_utils/training.py\n+++ b/src/accelerate/test_utils/training.py\n@@ -17,8 +17,6 @@\n from torch.utils.data import DataLoader\n \n from accelerate.utils.dataclasses import DistributedType\n-from datasets import load_dataset\n-from transformers import AutoTokenizer\n \n \n class RegressionDataset:\n@@ -51,6 +49,9 @@ def forward(self, x=None):\n \n \n def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ from datasets import load_dataset\n+ from transformers import AutoTokenizer\n+\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n datasets = load_dataset(\"csv\", data_files=data_files)\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 6ec2a143a..8f277b932 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -21,6 +21,8 @@\n import unittest\n from unittest import mock\n \n+import torch\n+\n from accelerate.test_utils.examples import compare_against_test\n from accelerate.test_utils.testing import TempDirTestCase, slow\n from accelerate.utils import write_basic_config\n@@ -142,11 +144,11 @@ def test_checkpointing_by_epoch(self):\n def test_checkpointing_by_steps(self):\n testargs = f\"\"\"\n examples/by_feature/checkpointing.py\n- --checkpointing_steps 2\n+ --checkpointing_steps 1\n --output_dir {self.tmpdir}\n \"\"\".split()\n _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE, env=os.environ)\n- self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_5\")))\n \n def test_load_states_by_epoch(self):\n testargs = f\"\"\"\n@@ -163,14 +165,23 @@ def test_load_states_by_epoch(self):\n def test_load_states_by_steps(self):\n testargs = f\"\"\"\n examples/by_feature/checkpointing.py\n- --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_4\")}\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_5\")}\n \"\"\".split()\n output = subprocess.run(\n self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ).stdout\n- self.assertNotIn(\"epoch 0:\", output)\n- self.assertIn(\"epoch 1:\", output)\n- self.assertIn(\"epoch 2:\", output)\n+ if torch.cuda.is_available():\n+ num_processes = torch.cuda.device_count()\n+ else:\n+ num_processes = 1\n+ if num_processes > 1:\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertNotIn(\"epoch 1:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n+ else:\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n \n @slow\n def test_cross_validation(self):\n", "code_comments": [ { "body": "Why was this necessary?", "diff_hunk": "@@ -0,0 +1,65 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install -e . --no-deps\n+ pip install tensorflow -U\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_cuda\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n+\n+ run_all_tests_multi_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'", "from_author": false }, { "body": "Is `tensorflow` required?", "diff_hunk": "@@ -0,0 +1,65 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install -e . --no-deps\n+ pip install tensorflow -U", "from_author": false }, { "body": "I was getting odd permission errors, and this was the documented solution I found. (Will post the link in the AM)", "diff_hunk": "@@ -0,0 +1,65 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install -e . --no-deps\n+ pip install tensorflow -U\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_cuda\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n+\n+ run_all_tests_multi_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'", "from_author": true }, { "body": "It won't be after the prior prs are merged, nice catch!", "diff_hunk": "@@ -0,0 +1,65 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install -e . --no-deps\n+ pip install tensorflow -U", "from_author": true }, { "body": "@LysandreJik here's the specific thread: https://github.com/actions/checkout/issues/760#issuecomment-1097519290\r\n\r\nHow we got here is the runner has a clone of accelerate inside the working directory for the action, and that clone is then updated, checked out, etc. But we run into this permission denied unsafe repository from github, so github needs to be told we can safely adjust the repository. ", "diff_hunk": "@@ -0,0 +1,65 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install -e . --no-deps\n+ pip install tensorflow -U\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_cuda\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n+\n+ run_all_tests_multi_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'", "from_author": true }, { "body": "Okay, interesting. Thanks!", "diff_hunk": "@@ -0,0 +1,65 @@\n+name: Self-hosted runner (scheduled)\n+\n+on:\n+ workflow_dispatch:\n+ schedule:\n+ - cron: \"0 2 * * *\"\n+\n+jobs:\n+ run_all_tests_single_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ CUDA_VISIBLE_DEVICES: \"0\"\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone & pip install\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'\n+ git fetch && git checkout ${{ github.sha }}\n+ pip install -e . --no-deps\n+ pip install tensorflow -U\n+ - name: Run test on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_cuda\n+ - name: Run examples on GPUs\n+ run: |\n+ source activate accelerate\n+ make test_examples\n+\n+ run_all_tests_multi_gpu:\n+ runs-on: [self-hosted, docker-gpu, multi-gpu]\n+ env:\n+ ALL_SLOW: \"yes\"\n+ container:\n+ image: huggingface/accelerate-gpu:latest\n+ options: --gpus all --shm-size \"16gb\"\n+ defaults:\n+ run:\n+ working-directory: accelerate/\n+ shell: bash\n+ steps:\n+ - name: Update clone\n+ run: |\n+ source activate accelerate\n+ git config --global --add safe.directory '*'", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/410", "pr_id": 949600817 }, { "diff": "diff --git a/docker/accelerate-cpu/Dockerfile b/docker/accelerate-cpu/Dockerfile\nindex c536fb70c..0c7073f7c 100644\n--- a/docker/accelerate-cpu/Dockerfile\n+++ b/docker/accelerate-cpu/Dockerfile\n@@ -27,8 +27,11 @@ RUN python3 -m pip install --no-cache-dir \\\n # Stage 2\n FROM python:3.6-slim AS build-image\n COPY --from=compile-image /opt/venv /opt/venv\n-RUN useradd -ms /bin/bash user\n-USER user\n+# Install apt libs\n+RUN apt-get update && \\\n+ apt-get install -y curl git wget && \\\n+ apt-get clean && \\\n+ rm -rf /var/lib/apt/lists*\n \n # Make sure we use the virtualenv\n ENV PATH=\"/opt/venv/bin:$PATH\"\ndiff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nindex 299f573d8..5eabb00f7 100644\n--- a/docker/accelerate-gpu/Dockerfile\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -30,9 +30,13 @@ FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image\n COPY --from=compile-image /opt/conda /opt/conda\n ENV PATH /opt/conda/bin:$PATH\n \n-RUN echo \"source activate accelerate\" >> /.bashrc\n+# Install apt libs\n+RUN apt-get update && \\\n+ apt-get install -y curl git wget && \\\n+ apt-get clean && \\\n+ rm -rf /var/lib/apt/lists*\n+\n+RUN echo \"source activate accelerate\" >> ~/.profile\n \n-RUN useradd -ms /bin/bash user\n-USER user\n # Activate the virtualenv\n CMD [\"/bin/bash\"]\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/409", "pr_id": 949593560 }, { "diff": "diff --git a/setup.py b/setup.py\nindex dcfe3fd8e..b53519525 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -19,6 +19,7 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = []\n extras[\"test\"] = [\n+ \"psutil\", \n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n@@ -27,7 +28,7 @@\n \"scipy\",\n \"sklearn\"\n ]\n-extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow\"]\n+extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow>=2.6.2\", \"tensorboard\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n \n extras[\"sagemaker\"] = [\n", "code_comments": [ { "body": "No, `psutil` should be an optional dependency as it's only used for big model inference when `device_map=\"auto\"`.", "diff_hunk": "@@ -55,7 +55,7 @@\n ]\n },\n python_requires=\">=3.6.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],", "from_author": false }, { "body": "Makes sense, will move it over there. Thanks!", "diff_hunk": "@@ -55,7 +55,7 @@\n ]\n },\n python_requires=\">=3.6.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/408", "pr_id": 949591409 }, { "diff": "diff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 6ec2a143a..3c875e02d 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -21,6 +21,7 @@\n import unittest\n from unittest import mock\n \n+from accelerate import Accelerator\n from accelerate.test_utils.examples import compare_against_test\n from accelerate.test_utils.testing import TempDirTestCase, slow\n from accelerate.utils import write_basic_config\n@@ -142,11 +143,11 @@ def test_checkpointing_by_epoch(self):\n def test_checkpointing_by_steps(self):\n testargs = f\"\"\"\n examples/by_feature/checkpointing.py\n- --checkpointing_steps 2\n+ --checkpointing_steps 1\n --output_dir {self.tmpdir}\n \"\"\".split()\n _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE, env=os.environ)\n- self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_5\")))\n \n def test_load_states_by_epoch(self):\n testargs = f\"\"\"\n@@ -163,14 +164,20 @@ def test_load_states_by_epoch(self):\n def test_load_states_by_steps(self):\n testargs = f\"\"\"\n examples/by_feature/checkpointing.py\n- --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_4\")}\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_5\")}\n \"\"\".split()\n output = subprocess.run(\n self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ).stdout\n- self.assertNotIn(\"epoch 0:\", output)\n- self.assertIn(\"epoch 1:\", output)\n- self.assertIn(\"epoch 2:\", output)\n+ num_processes = Accelerator().num_processes\n+ if num_processes > 1:\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertNotIn(\"epoch 1:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n+ else:\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n \n @slow\n def test_cross_validation(self):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/407", "pr_id": 949589769 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex d2de25a79..c585e742a 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -13,6 +13,7 @@ jobs:\n matrix:\n test-kind: [\n test,\n+ test_deepspeed,\n test_example_differences,\n test_checkpoint_step,\n test_checkpoint_epoch,\ndiff --git a/Makefile b/Makefile\nindex 38cb16cfc..58cb89ab9 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -27,6 +27,9 @@ style:\n test:\n \tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py\n \n+test_deepspeed:\n+\tpython -m pytest -s -v ./tests/deepspeed\n+\n test_examples:\n \tpython -m pytest -s -v ./tests/test_examples.py\n \ndiff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 130659196..4b2dfac18 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -29,4 +29,6 @@\n title: Fully Sharded Data Parallel\n - local: memory\n title: Memory Utilities\n+ - local: deepspeed\n+ title: DeepSpeed\n title: API Reference\ndiff --git a/docs/source/deepspeed.mdx b/docs/source/deepspeed.mdx\nnew file mode 100644\nindex 000000000..c0385c352\n--- /dev/null\n+++ b/docs/source/deepspeed.mdx\n@@ -0,0 +1,508 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently it provides full support for:\n+\n+1. Optimizer state partitioning (ZeRO stage 1)\n+2. Gradient partitioning (ZeRO stage 2)\n+3. Parameter partitioning (ZeRO stage 3)\n+4. Custom mixed precision training handling\n+5. A range of fast CUDA-extension-based optimizers\n+6. ZeRO-Offload to CPU and Disk/NVMe\n+\n+ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU\n+Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).\n+\n+DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.\n+\n+DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which\n+won't be possible on a single GPU.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters. \n+Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)\n+![ZeRO Data Parallelism](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png)\n+\n+(Source: [link](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/))\n+\n+ a. **Stage 1** : Shards optimizer states across data parallel workers/GPUs\n+\n+ b. **Stage 2** : Shards optimizer states + gradients across data parallel workers/GPUs\n+\n+ c. **Stage 3**: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs\n+\n+ d. **Optimizer Offload**: Offloads the gradients + optimizer states to CPU/Disk building on top of ZERO Stage 2\n+\n+ e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3\n+\n+<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically work on any Disk\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `accelerate config`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config.\n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py --mixed_precision fp16\n+```\n+\n+**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: cpu\n+ offload_param_device: cpu\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py --mixed_precision fp16\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning\n+`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.\n+`gradient_clipping`: Enable gradient clipping with value.\n+`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.\n+`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.\n+`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.\n+`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.\n+`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. \n+```\n+To be able to tweak more options, you will need to use a DeepSpeed config file.\n+\n+### DeepSpeed Config File\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes \n+and provide the path to the deepspeed config file. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File:\n+\n+**ZeRO Stage-2 DeepSpeed Config File Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+with the contents of `zero_stage2_config.json` being:\n+```json\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 2,\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU offload DeepSpeed Config File Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+with the contents of `zero_stage3_offload_config.json` being:\n+```json\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\"\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"offload_param\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"overlap_comm\": true,\n+ \"contiguous_gradients\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"stage3_prefetch_bucket_size\": \"auto\",\n+ \"stage3_param_persistence_threshold\": \"auto\",\n+ \"sub_group_size\": 1e9,\n+ \"stage3_max_live_parameters\": 1e9,\n+ \"stage3_max_reuse_distance\": 1e9,\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\"\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage3_offload_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 32 \\\n+--per_device_eval_batch_size 32 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**Important code changes when using DeepSpeed Config File**\n+\n+1. DeepSpeed Optimizers and Schedulers. For more information on these, \n+see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation.\n+We will look at the changes needed in the code when using these.\n+ \n+ a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys present in the DeepSpeed config file.\n+ In this situation, those will be used and user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.\n+ Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:\n+ ```python\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ optimizer_cls = (\n+ torch.optim.AdamW\n+ if accelerator.state.deepspeed_plugin is None\n+ or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ else DummyOptim\n+ )\n+ optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)\n+\n+ # Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler\n+ if (\n+ accelerator.state.deepspeed_plugin is None\n+ or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ ):\n+ lr_scheduler = get_scheduler(\n+ name=args.lr_scheduler_type,\n+ optimizer=optimizer,\n+ num_warmup_steps=args.num_warmup_steps,\n+ num_training_steps=args.max_train_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler(\n+ optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps\n+ )\n+ ```\n+ b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n+ In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.\n+ In the above example we can see that the code reamins unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n+\n+ c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. \n+ In this situation, user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. \n+\n+ d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. \n+ This will result in an error because one can only use DS Scheduler when using DS Optim.\n+\n+2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method \n+based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. \n+Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.\n+\n+## Saving and loading\n+\n+1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.\n+\n+2. under ZeRO Stage-3, `state_dict` contains just the placeholders since the model weights are partitioned across multiple GPUs.\n+ZeRO Stage-3 has 2 options:\n+\n+ a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`.\n+ For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set\n+ `zero3_save_16bit_model` to True in DeepSpeed Plugin. \n+ **Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.**\n+ Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:\n+ ```python\n+ unwrapped_model = accelerator.unwrap_model(model)\n+\n+ # New Code #\n+ # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if\n+ # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or\n+ # `zero3_save_16bit_model` is True in DeepSpeed Plugin.\n+ # For Zero Stages 1 and 2, models are saved as usual in the output directory.\n+ # The model name saved is `pytorch_model.bin`\n+ unwrapped_model.save_pretrained(\n+ args.output_dir,\n+ is_main_process=accelerator.is_main_process,\n+ save_function=accelerator.save,\n+ state_dict=accelerator.get_state_dict(model),\n+ )\n+ ```\n+\n+ b. To get 32bit weights, first save the model using `model.save_checkpoint()`.\n+ Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:\n+ ```python\n+ success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)\n+ status_msg = \"checkpointing: PATH={}, ckpt_id={}\".format(PATH, ckpt_id)\n+ if success:\n+ logging.info(f\"Success {status_msg}\")\n+ else:\n+ logging.warning(f\"Failure {status_msg}\")\n+ ``` \n+ This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory.\n+ One can use this script to do offline consolidation. \n+ It requires no configuration files or GPUs. Here is an example of its usage: \n+ ```bash\n+ $ cd /path/to/checkpoint_dir\n+ $ ./zero_to_fp32.py . pytorch_model.bin\n+ Processing zero checkpoint at global_step1\n+ Detected checkpoint of type zero stage 3, world_size: 2\n+ Saving fp32 state dict to pytorch_model.bin (total_numel=60506624)\n+ ```\n+ To get 32bit model for saving/inference, one can do the following:\n+ ```python\n+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint\n+\n+ unwrapped_model = accelerator.unwrap_model(model)\n+ fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir)\n+ ```\n+ If only interested in state_dict, one can do the following:\n+ ```python\n+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint\n+\n+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir)\n+ ```\n+ Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint.\n+\n+## ZeRO Inference\n+DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. \n+It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant.\n+With accelerate integration, one has to just prepare model and dataloader as shown below:\n+\n+```python\n+model, eval_dataloader = accelerator.prepare(model, eval_dataloader)\n+```\n+\n+## Few caveats to be aware of \n+\n+1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed.\n+2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM. \n+3. Current integration doesn’t support multiple models for a given `accelerator` object. \n+\n+\n+## Internals\n+\n+[[autodoc]] utils.DeepSpeedPlugin\n+\n+[[autodoc]] utils.DummyOptim\n+\n+[[autodoc]] utils.DummyScheduler\n+\n+[[autodoc]] utils.DeepSpeedEngineWrapper\n+\n+[[autodoc]] utils.DeepSpeedOptimizerWrapper\n+\n+[[autodoc]] utils.DeepSpeedSchedulerWrapper\n+\n+\n+## Main DeepSpeed Resources\n+\n+- [Project's github](https://github.com/microsoft/deepspeed)\n+- [Usage docs](https://www.deepspeed.ai/getting-started/)\n+- [API docs](https://deepspeed.readthedocs.io/en/latest/index.html)\n+- [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed)\n+\n+Papers:\n+\n+- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054)\n+- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)\n+- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)\n+\n+Finally, please, remember that, πŸ€— `Accelerate` only integrates DeepSpeed, therefore if you\n+have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).\n+\ndiff --git a/examples/by_feature/deepspeed_with_config_support.py b/examples/by_feature/deepspeed_with_config_support.py\nnew file mode 100755\nindex 000000000..fd8a8fa82\n--- /dev/null\n+++ b/examples/by_feature/deepspeed_with_config_support.py\n@@ -0,0 +1,736 @@\n+#!/usr/bin/env python\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\n+on a text file or a dataset without using HuggingFace Trainer.\n+\n+Here is the full list of checkpoints on the hub that can be fine-tuned by this script:\n+https://huggingface.co/models?filter=text-generation\n+\"\"\"\n+# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n+\n+import argparse\n+import json\n+import logging\n+import math\n+import os\n+import random\n+from itertools import chain\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import datasets\n+import transformers\n+from accelerate import Accelerator, DistributedType\n+from accelerate.logging import get_logger\n+from accelerate.utils import DummyOptim, DummyScheduler, set_seed\n+from datasets import load_dataset\n+from huggingface_hub import Repository\n+from tqdm.auto import tqdm\n+from transformers import (\n+ CONFIG_MAPPING,\n+ MODEL_MAPPING,\n+ AutoConfig,\n+ AutoModelForCausalLM,\n+ AutoTokenizer,\n+ SchedulerType,\n+ default_data_collator,\n+ get_scheduler,\n+)\n+from transformers.utils import get_full_repo_name\n+from transformers.utils.versions import require_version\n+\n+\n+logger = get_logger(__name__)\n+\n+require_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\n+\n+MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\n+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n+\n+\n+def parse_args():\n+ parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a causal language modeling task\")\n+ parser.add_argument(\n+ \"--dataset_name\",\n+ type=str,\n+ default=None,\n+ help=\"The name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--dataset_config_name\",\n+ type=str,\n+ default=None,\n+ help=\"The configuration name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_split_percentage\",\n+ default=5,\n+ help=\"The percentage of the train set used as validation set in case there's no validation split\",\n+ )\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--config_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained config name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--tokenizer_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained tokenizer name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--use_slow_tokenizer\",\n+ action=\"store_true\",\n+ help=\"If passed, will use a slow tokenizer (not backed by the πŸ€— Tokenizers library).\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_train_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the training dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_eval_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the evaluation dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--learning_rate\",\n+ type=float,\n+ default=5e-5,\n+ help=\"Initial learning rate (after the potential warmup period) to use.\",\n+ )\n+ parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n+ parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n+ parser.add_argument(\n+ \"--max_train_steps\",\n+ type=int,\n+ default=None,\n+ help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ type=int,\n+ default=1,\n+ help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n+ )\n+ parser.add_argument(\n+ \"--lr_scheduler_type\",\n+ type=SchedulerType,\n+ default=\"linear\",\n+ help=\"The scheduler type to use.\",\n+ choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n+ )\n+ parser.add_argument(\n+ \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n+ )\n+ parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n+ parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n+ parser.add_argument(\n+ \"--model_type\",\n+ type=str,\n+ default=None,\n+ help=\"Model type to use if training from scratch.\",\n+ choices=MODEL_TYPES,\n+ )\n+ parser.add_argument(\n+ \"--block_size\",\n+ type=int,\n+ default=None,\n+ help=(\n+ \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n+ \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n+ \" account special tokens).\"\n+ ),\n+ )\n+ parser.add_argument(\n+ \"--preprocessing_num_workers\",\n+ type=int,\n+ default=None,\n+ help=\"The number of processes to use for the preprocessing.\",\n+ )\n+ parser.add_argument(\n+ \"--overwrite_cache\", type=bool, default=False, help=\"Overwrite the cached training and evaluation sets\"\n+ )\n+ parser.add_argument(\n+ \"--no_keep_linebreaks\", action=\"store_true\", help=\"Do not keep line breaks when using TXT files.\"\n+ )\n+ parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n+ parser.add_argument(\n+ \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n+ )\n+ parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ # New Code #\n+ # Whether to load the best model at the end of training\n+ parser.add_argument(\n+ \"--load_best_model\",\n+ action=\"store_true\",\n+ help=\"Whether to load the best model at the end of training\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to enable experiment trackers for logging.\",\n+ )\n+ parser.add_argument(\n+ \"--report_to\",\n+ type=str,\n+ default=\"all\",\n+ help=(\n+ 'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n+ ' `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` (default) to report to all integrations.'\n+ \"Only applicable when `--with_tracking` is passed.\"\n+ ),\n+ )\n+ args = parser.parse_args()\n+\n+ # Sanity checks\n+ if args.dataset_name is None and args.train_file is None and args.validation_file is None:\n+ raise ValueError(\"Need either a dataset name or a training/validation file.\")\n+ else:\n+ if args.train_file is not None:\n+ extension = args.train_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\n+ if args.validation_file is not None:\n+ extension = args.validation_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\n+\n+ if args.push_to_hub:\n+ assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n+\n+ return args\n+\n+\n+# New Code #\n+def checkpoint_model(checkpoint_folder, ckpt_id, model, epoch, last_global_step, **kwargs):\n+ \"\"\"Utility function for checkpointing model + optimizer dictionaries\n+ The main purpose for this is to be able to resume training from that instant again\n+ \"\"\"\n+ checkpoint_state_dict = {\n+ \"epoch\": epoch,\n+ \"last_global_step\": last_global_step,\n+ }\n+ # Add extra kwargs too\n+ checkpoint_state_dict.update(kwargs)\n+\n+ success = model.save_checkpoint(checkpoint_folder, ckpt_id, checkpoint_state_dict)\n+ status_msg = f\"checkpointing: checkpoint_folder={checkpoint_folder}, ckpt_id={ckpt_id}\"\n+ if success:\n+ logging.info(f\"Success {status_msg}\")\n+ else:\n+ logging.warning(f\"Failure {status_msg}\")\n+ return\n+\n+\n+# New Code #\n+def load_training_checkpoint(model, load_dir, tag=None, **kwargs):\n+ \"\"\"Utility function for checkpointing model + optimizer dictionaries\n+ The main purpose for this is to be able to resume training from that instant again\n+ \"\"\"\n+ _, checkpoint_state_dict = model.load_checkpoint(load_dir, tag=tag, **kwargs)\n+ epoch = checkpoint_state_dict[\"epoch\"]\n+ last_global_step = checkpoint_state_dict[\"last_global_step\"]\n+ del checkpoint_state_dict\n+ return (epoch, last_global_step)\n+\n+\n+# New Code #\n+def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\n+ model.eval()\n+ losses = []\n+ for step, batch in enumerate(eval_dataloader):\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+\n+ loss = outputs.loss\n+ losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\n+\n+ losses = torch.cat(losses)\n+ losses = losses[: len(eval_dataset)]\n+ try:\n+ eval_loss = torch.mean(losses)\n+ perplexity = math.exp(eval_loss)\n+ except OverflowError:\n+ perplexity = float(\"inf\")\n+ return perplexity, eval_loss\n+\n+\n+def main():\n+ args = parse_args()\n+\n+ # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n+ # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers\n+ # in the environment\n+ accelerator = (\n+ Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()\n+ )\n+ # Make one log on every process with the configuration for debugging.\n+ logging.basicConfig(\n+ format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n+ datefmt=\"%m/%d/%Y %H:%M:%S\",\n+ level=logging.INFO,\n+ )\n+ logger.info(accelerator.state, main_process_only=False)\n+ if accelerator.is_local_main_process:\n+ datasets.utils.logging.set_verbosity_warning()\n+ transformers.utils.logging.set_verbosity_info()\n+ else:\n+ datasets.utils.logging.set_verbosity_error()\n+ transformers.utils.logging.set_verbosity_error()\n+\n+ # If passed along, set the training seed now.\n+ if args.seed is not None:\n+ set_seed(args.seed)\n+\n+ # Handle the repository creation\n+ if accelerator.is_main_process:\n+ if args.push_to_hub:\n+ if args.hub_model_id is None:\n+ repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\n+ else:\n+ repo_name = args.hub_model_id\n+ repo = Repository(args.output_dir, clone_from=repo_name)\n+\n+ with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n+ if \"step_*\" not in gitignore:\n+ gitignore.write(\"step_*\\n\")\n+ if \"epoch_*\" not in gitignore:\n+ gitignore.write(\"epoch_*\\n\")\n+ elif args.output_dir is not None:\n+ os.makedirs(args.output_dir, exist_ok=True)\n+ accelerator.wait_for_everyone()\n+\n+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n+ # (the dataset will be downloaded automatically from the datasets Hub).\n+ #\n+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n+ # 'text' is found. You can easily tweak this behavior (see below).\n+ #\n+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n+ # download the dataset.\n+ if args.dataset_name is not None:\n+ # Downloading and loading a dataset from the hub.\n+ raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n+ if \"validation\" not in raw_datasets.keys():\n+ raw_datasets[\"validation\"] = load_dataset(\n+ args.dataset_name,\n+ args.dataset_config_name,\n+ split=f\"train[:{args.validation_split_percentage}%]\",\n+ )\n+ raw_datasets[\"train\"] = load_dataset(\n+ args.dataset_name,\n+ args.dataset_config_name,\n+ split=f\"train[{args.validation_split_percentage}%:]\",\n+ )\n+ else:\n+ data_files = {}\n+ dataset_args = {}\n+ if args.train_file is not None:\n+ data_files[\"train\"] = args.train_file\n+ if args.validation_file is not None:\n+ data_files[\"validation\"] = args.validation_file\n+ extension = args.train_file.split(\".\")[-1]\n+ if extension == \"txt\":\n+ extension = \"text\"\n+ dataset_args[\"keep_linebreaks\"] = not args.no_keep_linebreaks\n+ raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)\n+ # If no validation data is there, validation_split_percentage will be used to divide the dataset.\n+ if \"validation\" not in raw_datasets.keys():\n+ raw_datasets[\"validation\"] = load_dataset(\n+ extension,\n+ data_files=data_files,\n+ split=f\"train[:{args.validation_split_percentage}%]\",\n+ **dataset_args,\n+ )\n+ raw_datasets[\"train\"] = load_dataset(\n+ extension,\n+ data_files=data_files,\n+ split=f\"train[{args.validation_split_percentage}%:]\",\n+ **dataset_args,\n+ )\n+\n+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n+ # https://huggingface.co/docs/datasets/loading_datasets.html.\n+\n+ # Load pretrained model and tokenizer\n+ #\n+ # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n+ # download model & vocab.\n+ if args.config_name:\n+ config = AutoConfig.from_pretrained(args.config_name)\n+ elif args.model_name_or_path:\n+ config = AutoConfig.from_pretrained(args.model_name_or_path)\n+ else:\n+ config = CONFIG_MAPPING[args.model_type]()\n+ logger.warning(\"You are instantiating a new config instance from scratch.\")\n+\n+ if args.tokenizer_name:\n+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)\n+ elif args.model_name_or_path:\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n+ else:\n+ raise ValueError(\n+ \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n+ \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n+ )\n+\n+ if args.model_name_or_path:\n+ model = AutoModelForCausalLM.from_pretrained(\n+ args.model_name_or_path,\n+ from_tf=bool(\".ckpt\" in args.model_name_or_path),\n+ config=config,\n+ )\n+ else:\n+ logger.info(\"Training new model from scratch\")\n+ model = AutoModelForCausalLM.from_config(config)\n+\n+ model.resize_token_embeddings(len(tokenizer))\n+\n+ # Preprocessing the datasets.\n+ # First we tokenize all the texts.\n+ column_names = raw_datasets[\"train\"].column_names\n+ text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n+\n+ def tokenize_function(examples):\n+ return tokenizer(examples[text_column_name])\n+\n+ with accelerator.main_process_first():\n+ tokenized_datasets = raw_datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ num_proc=args.preprocessing_num_workers,\n+ remove_columns=column_names,\n+ load_from_cache_file=not args.overwrite_cache,\n+ desc=\"Running tokenizer on dataset\",\n+ )\n+\n+ if args.block_size is None:\n+ block_size = tokenizer.model_max_length\n+ if block_size > 1024:\n+ logger.warning(\n+ f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n+ \"Picking 1024 instead. You can change that default value by passing --block_size xxx.\"\n+ )\n+ block_size = 1024\n+ else:\n+ if args.block_size > tokenizer.model_max_length:\n+ logger.warning(\n+ f\"The block_size passed ({args.block_size}) is larger than the maximum length for the model\"\n+ f\"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.\"\n+ )\n+ block_size = min(args.block_size, tokenizer.model_max_length)\n+\n+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.\n+ def group_texts(examples):\n+ # Concatenate all texts.\n+ concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n+ total_length = len(concatenated_examples[list(examples.keys())[0]])\n+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n+ # customize this part to your needs.\n+ if total_length >= block_size:\n+ total_length = (total_length // block_size) * block_size\n+ # Split by chunks of max_len.\n+ result = {\n+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n+ for k, t in concatenated_examples.items()\n+ }\n+ result[\"labels\"] = result[\"input_ids\"].copy()\n+ return result\n+\n+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder\n+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower\n+ # to preprocess.\n+ #\n+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map\n+\n+ with accelerator.main_process_first():\n+ lm_datasets = tokenized_datasets.map(\n+ group_texts,\n+ batched=True,\n+ num_proc=args.preprocessing_num_workers,\n+ load_from_cache_file=not args.overwrite_cache,\n+ desc=f\"Grouping texts in chunks of {block_size}\",\n+ )\n+\n+ train_dataset = lm_datasets[\"train\"]\n+ eval_dataset = lm_datasets[\"validation\"]\n+\n+ # Log a few random samples from the training set:\n+ for index in random.sample(range(len(train_dataset)), 3):\n+ logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n+\n+ # DataLoaders creation:\n+ train_dataloader = DataLoader(\n+ train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size\n+ )\n+\n+ # Optimizer\n+ # Split weights in two groups, one with weight decay and the other not.\n+ no_decay = [\"bias\", \"LayerNorm.weight\"]\n+ optimizer_grouped_parameters = [\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n+ \"weight_decay\": args.weight_decay,\n+ },\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n+ \"weight_decay\": 0.0,\n+ },\n+ ]\n+ # New Code #\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ optimizer_cls = (\n+ torch.optim.AdamW\n+ if accelerator.state.deepspeed_plugin is None\n+ or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ else DummyOptim\n+ )\n+ optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)\n+\n+ # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ model.tie_weights()\n+\n+ # Scheduler and math around the number of training steps.\n+\n+ # New Code\n+ # Get gradient accumulation steps from deepspeed config if available\n+ if accelerator.state.deepspeed_plugin is not None:\n+ args.gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[\n+ \"gradient_accumulation_steps\"\n+ ]\n+\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n+ if args.max_train_steps is None:\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n+ else:\n+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n+\n+ # New Code #\n+ # Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler\n+ if (\n+ accelerator.state.deepspeed_plugin is None\n+ or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n+ ):\n+ lr_scheduler = get_scheduler(\n+ name=args.lr_scheduler_type,\n+ optimizer=optimizer,\n+ num_warmup_steps=args.num_warmup_steps,\n+ num_training_steps=args.max_train_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler(\n+ optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps\n+ )\n+\n+ # Prepare everything with our `accelerator`.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n+\n+ # Figure out how many steps we should save the Accelerator states\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+\n+ # We need to initialize the trackers we use, and also store our configuration.\n+ # We initialize the trackers only on main process because `accelerator.log`\n+ # only logs on main process and we don't want empty logs/runs on other processes.\n+ if args.with_tracking:\n+ if accelerator.is_main_process:\n+ experiment_config = vars(args)\n+ # TensorBoard cannot log Enums, need the raw value\n+ experiment_config[\"lr_scheduler_type\"] = experiment_config[\"lr_scheduler_type\"].value\n+ accelerator.init_trackers(\"clm_no_trainer\", experiment_config)\n+\n+ # Train!\n+ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n+\n+ logger.info(\"***** Running training *****\")\n+ logger.info(f\" Num examples = {len(train_dataset)}\")\n+ logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n+ logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n+ logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n+ logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n+ logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n+ # Only show the progress bar once on each machine.\n+ progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n+ completed_steps = 0\n+ starting_epoch = 0\n+ best_metric = None\n+ best_metric_checkpoint = None\n+\n+ # Potentially load in the weights and states from a previous save\n+ if args.resume_from_checkpoint:\n+ # New Code #\n+ # Loads the DeepSpeed checkpoint from the specified path\n+ _, last_global_step = load_training_checkpoint(\n+ model,\n+ args.resume_from_checkpoint,\n+ **{\"load_optimizer_states\": True, \"load_lr_scheduler_states\": True},\n+ )\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ resume_step = last_global_step\n+ starting_epoch = resume_step // len(train_dataloader)\n+ resume_step -= starting_epoch * len(train_dataloader)\n+\n+ for epoch in range(starting_epoch, args.num_train_epochs):\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ # We need to skip steps until we reach the resumed step\n+ if args.resume_from_checkpoint and epoch == starting_epoch:\n+ if resume_step is not None and step < resume_step:\n+ completed_steps += 1\n+ continue\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ # We keep track of the loss at each epoch\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n+ loss = loss / args.gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+ progress_bar.update(1)\n+ completed_steps += 1\n+\n+ if isinstance(checkpointing_steps, int):\n+ if completed_steps % checkpointing_steps == 0:\n+ output_dir = f\"step_{completed_steps }\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ if completed_steps >= args.max_train_steps:\n+ break\n+\n+ perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset)\n+ logger.info(f\"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}\")\n+\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"perplexity\": perplexity,\n+ \"eval_loss\": eval_loss,\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\n+ \"epoch\": epoch,\n+ \"step\": completed_steps,\n+ },\n+ step=completed_steps,\n+ )\n+\n+ # New Code #\n+ # Save the DeepSpeed checkpoint to the specified path\n+ checkpoint_model(args.output_dir, epoch, model, epoch, completed_steps)\n+\n+ # New Code #\n+ # Tracks the best checkpoint and best metric\n+ if best_metric is None or best_metric > perplexity:\n+ best_metric = perplexity\n+ best_metric_checkpoint = os.path.join(args.output_dir, str(epoch))\n+ accelerator.print(f\"New best metric: {best_metric} at epoch {epoch}\")\n+ accelerator.print(f\"best_metric_checkpoint: {best_metric_checkpoint}\")\n+\n+ # New Code #\n+ # Loads the best checkpoint after the training is finished\n+ if args.load_best_model:\n+ _, last_global_step = load_training_checkpoint(\n+ model,\n+ \"/\".join(best_metric_checkpoint.split(\"/\")[:-1]),\n+ tag=best_metric_checkpoint.split(\"/\")[-1],\n+ **{\"load_optimizer_states\": True, \"load_lr_scheduler_states\": True},\n+ )\n+\n+ # New Code #\n+ # Evaluates using the best checkpoint\n+ perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset)\n+ logger.info(f\"Best model metrics: perplexity: {perplexity} eval_loss: {eval_loss}\")\n+ if perplexity != best_metric:\n+ raise AssertionError(\n+ f\"Best metric {best_metric} does not match the metric {perplexity} of the loaded best model.\"\n+ )\n+\n+ if args.output_dir is not None:\n+ accelerator.wait_for_everyone()\n+ unwrapped_model = accelerator.unwrap_model(model)\n+\n+ # New Code #\n+ # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if\n+ # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or\n+ # `zero3_save_16bit_model` is True in DeepSpeed Plugin.\n+ # For Zero Stages 1 and 2, models are saved as usual in the output directory.\n+ # The model name saved is `pytorch_model.bin`\n+ unwrapped_model.save_pretrained(\n+ args.output_dir,\n+ is_main_process=accelerator.is_main_process,\n+ save_function=accelerator.save,\n+ state_dict=accelerator.get_state_dict(model),\n+ )\n+ if accelerator.is_main_process:\n+ tokenizer.save_pretrained(args.output_dir)\n+ if args.push_to_hub:\n+ repo.push_to_hub(commit_message=\"End of training\", auto_lfs_prune=True)\n+\n+ with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\n+ json.dump({\"perplexity\": perplexity, \"eval_loss\": eval_loss.item()}, f)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/examples/deepspeed_config_templates/zero_stage1_config.json b/examples/deepspeed_config_templates/zero_stage1_config.json\nnew file mode 100644\nindex 000000000..674420e57\n--- /dev/null\n+++ b/examples/deepspeed_config_templates/zero_stage1_config.json\n@@ -0,0 +1,43 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 1,\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/examples/deepspeed_config_templates/zero_stage2_config.json b/examples/deepspeed_config_templates/zero_stage2_config.json\nnew file mode 100644\nindex 000000000..9597f8485\n--- /dev/null\n+++ b/examples/deepspeed_config_templates/zero_stage2_config.json\n@@ -0,0 +1,43 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 2,\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/examples/deepspeed_config_templates/zero_stage2_offload_config.json b/examples/deepspeed_config_templates/zero_stage2_offload_config.json\nnew file mode 100644\nindex 000000000..98baedef3\n--- /dev/null\n+++ b/examples/deepspeed_config_templates/zero_stage2_offload_config.json\n@@ -0,0 +1,47 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 2,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/examples/deepspeed_config_templates/zero_stage3_config.json b/examples/deepspeed_config_templates/zero_stage3_config.json\nnew file mode 100644\nindex 000000000..2ec6fff41\n--- /dev/null\n+++ b/examples/deepspeed_config_templates/zero_stage3_config.json\n@@ -0,0 +1,44 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\"\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"overlap_comm\": true,\n+ \"contiguous_gradients\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"stage3_prefetch_bucket_size\": \"auto\",\n+ \"stage3_param_persistence_threshold\": \"auto\",\n+ \"sub_group_size\": 1e9,\n+ \"stage3_max_live_parameters\": 1e9,\n+ \"stage3_max_reuse_distance\": 1e9,\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\"\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/examples/deepspeed_config_templates/zero_stage3_offload_config.json b/examples/deepspeed_config_templates/zero_stage3_offload_config.json\nnew file mode 100644\nindex 000000000..edae8e628\n--- /dev/null\n+++ b/examples/deepspeed_config_templates/zero_stage3_offload_config.json\n@@ -0,0 +1,52 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": true,\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\"\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"offload_param\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"overlap_comm\": true,\n+ \"contiguous_gradients\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"stage3_prefetch_bucket_size\": \"auto\",\n+ \"stage3_param_persistence_threshold\": \"auto\",\n+ \"sub_group_size\": 1e9,\n+ \"stage3_max_live_parameters\": 1e9,\n+ \"stage3_max_reuse_distance\": 1e9,\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\"\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/setup.py b/setup.py\nindex a28b9254c..45c4ae912 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,9 @@\n \"evaluate\",\n \"transformers\",\n \"scipy\",\n- \"sklearn\"\n+ \"sklearn\",\n+ \"parameterized\",\n+ \"deepspeed\",\n ]\n \n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 6dc1f847c..9205f0854 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -13,10 +13,12 @@\n # limitations under the License.\n \n import gc\n+import math\n import os\n import sys\n import warnings\n from contextlib import contextmanager\n+from copy import deepcopy\n from typing import List, Optional, Union\n \n import torch\n@@ -39,12 +41,14 @@\n LoggerType,\n PrecisionType,\n RNGType,\n+ compare_versions,\n convert_outputs_to_fp32,\n extract_model_from_parallel,\n gather,\n get_pretty_name,\n is_deepspeed_available,\n is_torch_version,\n+ is_transformers_available,\n pad_across_processes,\n reduce,\n save,\n@@ -55,7 +59,13 @@\n if is_deepspeed_available():\n import deepspeed\n \n- from .utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n+ from .utils import (\n+ DeepSpeedEngineWrapper,\n+ DeepSpeedOptimizerWrapper,\n+ DeepSpeedSchedulerWrapper,\n+ DummyOptim,\n+ DummyScheduler,\n+ )\n \n logger = get_logger(__name__)\n \n@@ -163,6 +173,30 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ if deepspeed_plugin:\n+ if not is_deepspeed_available():\n+ raise ImportError(\"DeepSpeed is not installed => run `pip install deepspeed` or build it from source.\")\n+ if compare_versions(\"deepspeed\", \"<\", \"0.6.5\"):\n+ raise ImportError(\"DeepSpeed version must be >= 0.6.5. Please update DeepSpeed.\")\n+ if os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\" or deepspeed_plugin.zero3_init_flag:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip install transformers`.\"\n+ )\n+ from transformers.deepspeed import HfDeepSpeedConfig\n+\n+ ds_config = deepcopy(deepspeed_plugin.deepspeed_config)\n+ del ds_config[\"train_batch_size\"]\n+ ds_config.update({\"train_micro_batch_size_per_gpu\": 1, \"gradient_accumulation_steps\": 1})\n+ mixed_precision = (\n+ os.environ.get(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ )\n+ if mixed_precision == \"fp16\":\n+ ds_config.update({\"fp16\": {\"enabled\": True}})\n+ elif mixed_precision == \"bf16\":\n+ ds_config.update({\"bf16\": {\"enabled\": True}})\n+ self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa\n \n if fsdp_plugin is None: # init from env variables\n fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" else None\n@@ -497,9 +531,15 @@ def prepare_model(self, model):\n def _prepare_deepspeed(self, *args):\n \n deepspeed_plugin = self.state.deepspeed_plugin\n- self.deepspeed_config = deepspeed_plugin.deepspeed_config\n+\n+ result = [\n+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n+ for obj in args\n+ ]\n \n batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ if self.split_batches:\n+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n if len(batch_sizes) == 0:\n raise ValueError(\n \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n@@ -508,73 +548,141 @@ def _prepare_deepspeed(self, *args):\n batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n if len(batch_sizes) > 1:\n logger.info(\n- f\"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \\\n- {deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\n+ f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n )\n \n- self.deepspeed_config[\"train_batch_size\"] = (\n- batch_size_per_device * deepspeed_plugin.gradient_accumulation_steps * self.num_processes\n- )\n-\n- result = [\n- self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n- for obj in args\n- ]\n+ config_kwargs = {\n+ \"train_micro_batch_size_per_gpu\": batch_size_per_device,\n+ \"train_batch_size\": batch_size_per_device\n+ * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n+ * self.num_processes,\n+ \"gradient_clipping\": 1.0,\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": False,\n+ }\n \n model = None\n optimizer = None\n+ scheduler = None\n for obj in result:\n if isinstance(obj, torch.nn.Module):\n model = obj\n- elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):\n optimizer = obj\n+ elif (isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n+ ):\n+ scheduler = obj\n+\n+ if optimizer is not None:\n+ if \"optimizer\" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)):\n+ raise ValueError(\n+ \"You cannot specify an optimizer in the config file and in the code at the same time. \"\n+ \"Please remove the optimizer from the config file or \"\n+ \"create `accelerate.utils.DummyOptim` in the code.\"\n+ )\n+ elif \"optimizer\" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)):\n+ raise ValueError(\n+ \"You cannot create a `DummyOptim` without specifying an optimizer in the config file.\"\n+ )\n+\n+ if isinstance(optimizer, (torch.optim.Optimizer)):\n+ deepspeed_plugin.deepspeed_config[\"zero_allow_untested_optimizer\"] = True\n+\n+ if scheduler is not None:\n+ if \"scheduler\" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You cannot specify a scheduler in the config file and in the code at the same time. \"\n+ \"Please remove the scheduler from the config file or \"\n+ \"create `accelerate.utils.DummyScheduler` in the code.\"\n+ )\n+ elif \"scheduler\" not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You cannot create a `DummyScheduler` without specifying a scheduler in the config file.\"\n+ )\n \n- if deepspeed_plugin.auto_opt_mapping:\n- is_adam = isinstance(optimizer, torch.optim.Adam)\n- is_adamw = isinstance(optimizer, torch.optim.AdamW)\n- if (is_adam or is_adamw) and deepspeed_plugin.offload_optimizer_device == \"cpu\":\n- defaults = optimizer.defaults\n- params = []\n- for group in optimizer.param_groups:\n- params.extend(group[\"params\"])\n-\n- optimizer = deepspeed.ops.adam.DeepSpeedCPUAdam(\n- params,\n- lr=defaults[\"lr\"],\n- bias_correction=True,\n- betas=defaults[\"betas\"],\n- eps=defaults[\"eps\"],\n- weight_decay=defaults[\"weight_decay\"],\n- amsgrad=defaults[\"amsgrad\"],\n- adamw_mode=is_adamw,\n+ if optimizer is not None and scheduler is not None:\n+ if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You can only specify `accelerate.utils.DummyScheduler` in the code when using \"\n+ \"`accelerate.utils.DummyOptim`.\"\n )\n \n- # useful when only eval_dataloader is given into `accelerator.prepare()`\n if model is not None:\n- engine = DeepSpeedEngineWrapper(\n- args=None,\n- model=model,\n- optimizer=optimizer,\n- config_params=self.deepspeed_config,\n- dist_init_required=False,\n- )\n+ if hasattr(model, \"config\") and hasattr(model.config, \"hidden_size\"):\n+ hidden_size = model.config.hidden_size\n+ config_kwargs.update(\n+ {\n+ \"zero_optimization.reduce_bucket_size\": hidden_size * hidden_size,\n+ \"zero_optimization.stage3_prefetch_bucket_size\": 0.9 * hidden_size * hidden_size,\n+ \"zero_optimization.stage3_param_persistence_threshold\": 10 * hidden_size,\n+ }\n+ )\n+\n+ if isinstance(optimizer, (DummyOptim)):\n+ config_kwargs.update(\n+ {\"optimizer.params.lr\": optimizer.lr, \"optimizer.params.weight_decay\": optimizer.weight_decay}\n+ )\n+ if isinstance(scheduler, (DummyScheduler)):\n+ config_kwargs.update(\n+ {\n+ \"scheduler.params.warmup_min_lr\": 0,\n+ \"scheduler.params.warmup_max_lr\": scheduler.optimizer.lr,\n+ \"scheduler.params.warmup_num_steps\": scheduler.warmup_num_steps,\n+ }\n+ )\n+ if scheduler.total_num_steps is not None:\n+ config_kwargs[\"scheduler.params.total_num_steps\"] = (\n+ math.ceil(scheduler.total_num_steps / self.num_processes)\n+ if not self.split_batches\n+ else scheduler.total_num_steps\n+ )\n+ deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs)\n+ self.deepspeed_config = deepspeed_plugin.deepspeed_config\n+ kwargs = dict(model=model, config_params=self.deepspeed_config)\n+ if optimizer is not None:\n+ if isinstance(optimizer, (DummyOptim)):\n+ kwargs[\"model_parameters\"] = optimizer.params\n+ else:\n+ kwargs[\"optimizer\"] = optimizer\n+ if scheduler is not None:\n+ if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:\n+ kwargs[\"lr_scheduler\"] = scheduler\n+\n+ engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)\n+ if optimizer is not None:\n+ optimizer = DeepSpeedOptimizerWrapper(optimizer)\n+ if scheduler is not None:\n+ if lr_scheduler is None:\n+ scheduler = AcceleratedScheduler(\n+ scheduler,\n+ optimizer,\n+ step_with_optimizer=self.step_scheduler_with_optimizer,\n+ split_batches=self.split_batches,\n+ )\n+ else:\n+ scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)\n+\n for i in range(len(result)):\n if isinstance(result[i], torch.nn.Module):\n result[i] = engine\n- elif isinstance(result[i], torch.optim.Optimizer):\n- result[i] = DeepSpeedOptimizerWrapper(engine.optimizer, engine)\n- self.deepspeed_engine = engine # pointing for deepspeed_engine.backward()\n+ elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):\n+ result[i] = optimizer\n+ elif (isinstance(result[i], (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n+ ):\n+ result[i] = scheduler\n+ # pointing for deepspeed_engine_wrapped.backward()\n+ self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine)\n self._models.append(engine)\n- self._optimizers.append(engine.optimizer)\n- assert (\n- len(self._models) == 1\n- ), \"You can't use same `Accelerator()` instance with 2 models when using DeepSpeed\"\n-\n- if self.distributed_type == DistributedType.DEEPSPEED:\n- assert hasattr(\n- self, \"deepspeed_engine\"\n- ), \"You need to pass the model along the optimizer when using Deepspeed.\"\n-\n+ if optimizer is not None:\n+ self._optimizers.append(optimizer)\n+ if scheduler is not None:\n+ self._schedulers.append(scheduler)\n+ if len(self._models) > 1:\n+ raise AssertionError(\n+ \"You can't use same `Accelerator()` instance with multiple models when using DeepSpeed\"\n+ )\n return tuple(result)\n \n def prepare_data_loader(self, data_loader):\n@@ -612,7 +720,7 @@ def backward(self, loss, **kwargs):\n Use `accelerator.backward(loss)` in lieu of `loss.backward()`.\n \"\"\"\n if self.distributed_type == DistributedType.DEEPSPEED:\n- self.deepspeed_engine.backward(loss, **kwargs)\n+ self.deepspeed_engine_wrapped.backward(loss, **kwargs)\n elif self.scaler is not None:\n self.scaler.scale(loss).backward(**kwargs)\n else:\n@@ -648,6 +756,9 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n if parameters == [p for p in model.parameters()]:\n model.clip_grad_norm_(max_norm, norm_type)\n return\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\n+ # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n+ return\n self.unscale_gradients()\n torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n \n@@ -655,6 +766,8 @@ def clip_grad_value_(self, parameters, clip_value):\n \"\"\"\n Should be used in place of `torch.nn.utils.clip_grad_value_`.\n \"\"\"\n+ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:\n+ raise Exception(\"DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.\")\n self.unscale_gradients()\n torch.nn.utils.clip_grad_value_(parameters, clip_value)\n \n@@ -837,7 +950,7 @@ def free_memory(self):\n self._schedulers = []\n self._optimizers = []\n self._models = []\n- self.deepspeed_engine = None\n+ self.deepspeed_engine_wrapped = None\n gc.collect()\n torch.cuda.empty_cache()\n \n@@ -875,12 +988,19 @@ def _get_devices(self, *args):\n \n def get_state_dict(self, model):\n is_zero_3 = False\n- if is_deepspeed_available():\n- if isinstance(model, DeepSpeedEngineWrapper) and self.distributed_type == DistributedType.DEEPSPEED:\n- is_zero_3 = self.state.deepspeed_plugin.zero_stage == 3\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ is_zero_3 = self.deepspeed_config[\"zero_optimization\"][\"stage\"] == 3\n \n if is_zero_3:\n- state_dict = model._zero3_consolidated_16bit_state_dict()\n+ if model.zero_gather_16bit_weights_on_model_save():\n+ state_dict = model._zero3_consolidated_16bit_state_dict()\n+ else:\n+ raise ValueError(\n+ \"Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. \"\n+ \"To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or \"\n+ \"set `zero3_save_16bit_model` to True when using `accelerate config`. \"\n+ \"To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.\"\n+ )\n else:\n model = self.unwrap_model(model)\n state_dict = model.state_dict()\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 01a55e646..907122036 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -14,7 +14,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available\n+from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available\n from .config_args import ClusterConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n \n@@ -77,24 +77,72 @@ def get_cluster_input():\n ), \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source\"\n \n if distributed_type == DistributedType.DEEPSPEED:\n- deepspeed_config[\"zero_stage\"] = _ask_field(\n- \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n- lambda x: int(x),\n- default=2,\n+ use_deepspeed_config = _ask_field(\n+ \"Do you want to specify a json file to a DeepSpeed config? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n )\n-\n- if deepspeed_config[\"zero_stage\"] >= 2:\n- deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\n- \"Where to offload optimizer states? [NONE/cpu/nvme]: \",\n+ if use_deepspeed_config:\n+ deepspeed_config[\"deepspeed_config_file\"] = _ask_field(\n+ \"Please enter the path to the json DeepSpeed config file: \",\n lambda x: str(x),\n default=\"none\",\n )\n+ else:\n+ deepspeed_config[\"zero_stage\"] = _ask_field(\n+ \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n+ lambda x: int(x),\n+ default=2,\n+ )\n \n- deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\n- \"How many gradient accumulation steps you're passing in your script? [1]: \",\n- lambda x: int(x),\n- default=1,\n+ if deepspeed_config[\"zero_stage\"] >= 2:\n+ deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\n+ \"Where to offload optimizer states? [none/cpu/nvme]: \",\n+ lambda x: str(x),\n+ default=\"none\",\n+ )\n+ deepspeed_config[\"offload_param_device\"] = _ask_field(\n+ \"Where to offload parameters? [none/cpu/nvme]: \",\n+ lambda x: str(x),\n+ default=\"none\",\n+ )\n+ deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\n+ \"How many gradient accumulation steps you're passing in your script? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ )\n+ use_gradient_clipping = _ask_field(\n+ \"Do you want to use gradient clipping? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_gradient_clipping:\n+ deepspeed_config[\"gradient_clipping\"] = _ask_field(\n+ \"What is the gradient clipping value? [1.0]: \",\n+ lambda x: float(x),\n+ default=1.0,\n+ )\n+ if deepspeed_config[\"zero_stage\"] == 3:\n+ deepspeed_config[\"zero3_save_16bit_model\"] = _ask_field(\n+ \"Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ deepspeed_config[\"zero3_init_flag\"] = _ask_field(\n+ \"Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n )\n+ if deepspeed_config[\"zero3_init_flag\"]:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip3 install transformers`.\"\n+ )\n \n fsdp_config = {}\n if distributed_type in [DistributedType.MULTI_GPU]:\n@@ -155,11 +203,14 @@ def get_cluster_input():\n num_processes = 1\n \n if distributed_type != DistributedType.TPU:\n- mixed_precision = _ask_field(\n- \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\n- lambda x: str(x).lower(),\n- default=\"no\",\n- )\n+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:\n+ mixed_precision = \"no\"\n+ else:\n+ mixed_precision = _ask_field(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\n+ lambda x: str(x).lower(),\n+ default=\"no\",\n+ )\n else:\n mixed_precision = \"no\"\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 22c8e621c..93b04d6ab 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -31,6 +31,7 @@\n DistributedType,\n PrecisionType,\n PrepareForLaunch,\n+ is_deepspeed_available,\n is_sagemaker_available,\n )\n from accelerate.utils.versions import is_torch_version\n@@ -57,6 +58,56 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Whether to use deepspeed.\",\n )\n+ parser.add_argument(\n+ \"--deepspeed_config_file\",\n+ default=None,\n+ type=str,\n+ help=\"DeepSpeed config file.\",\n+ )\n+ parser.add_argument(\n+ \"--zero_stage\",\n+ default=None,\n+ type=int,\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--offload_optimizer_device\",\n+ default=None,\n+ type=str,\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--offload_param_device\",\n+ default=None,\n+ type=str,\n+ help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ default=None,\n+ type=int,\n+ help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_clipping\",\n+ default=None,\n+ type=float,\n+ help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--zero3_init_flag\",\n+ default=None,\n+ type=str,\n+ help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n+ \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n+ )\n+ parser.add_argument(\n+ \"--zero3_save_16bit_model\",\n+ default=None,\n+ type=str,\n+ help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n+ \"Only applicable with DeepSpeed ZeRO Stage-3.\",\n+ )\n parser.add_argument(\n \"--use_fsdp\",\n default=False,\n@@ -158,24 +209,6 @@ def launch_command_parser(subparsers=None):\n \"script.\"\n ),\n )\n- parser.add_argument(\n- \"--zero_stage\",\n- default=None,\n- type=int,\n- help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n- )\n- parser.add_argument(\n- \"--offload_optimizer_device\",\n- default=None,\n- type=str,\n- help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n- )\n- parser.add_argument(\n- \"--gradient_accumulation_steps\",\n- default=None,\n- type=int,\n- help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\n- )\n \n # Other arguments of the training scripts\n parser.add_argument(\"training_script_args\", nargs=argparse.REMAINDER, help=\"Arguments of the training script.\")\n@@ -279,6 +312,8 @@ def multi_gpu_launcher(args):\n \n \n def deepspeed_launcher(args):\n+ if not is_deepspeed_available():\n+ raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\n cmd = [\"deepspeed\", \"--no_local_rank\"]\n if args.num_machines > 1:\n cmd.extend(\n@@ -323,7 +358,12 @@ def deepspeed_launcher(args):\n current_env[\"USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n+ current_env[\"GRADIENT_CLIPPING\"] = str(args.gradient_clipping)\n current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n+ current_env[\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\"] = str(args.offload_param_device).lower()\n+ current_env[\"DEEPSPEED_ZERO3_INIT\"] = str(args.zero3_init_flag).lower()\n+ current_env[\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\"] = str(args.zero3_save_16bit_model).lower()\n+ current_env[\"DEEPSPEED_CONFIG_FILE\"] = str(args.deepspeed_config_file).lower()\n \n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex fbda09754..6e09fdae4 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -108,10 +108,18 @@ def __init__(\n mixed_precision = (\n parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n )\n- if mixed_precision == \"fp16\":\n+ if (\n+ mixed_precision == \"fp16\"\n+ and \"fp16\" not in deepspeed_plugin.deepspeed_config\n+ and \"bf16\" not in deepspeed_plugin.deepspeed_config\n+ ):\n deepspeed_plugin.deepspeed_config.update({\"fp16\": {\"enabled\": True}})\n- elif mixed_precision == \"bf16\":\n- deepspeed_plugin.deepspeed_config.update({\"bfloat16\": {\"enabled\": True}})\n+ elif (\n+ mixed_precision == \"bf16\"\n+ and \"fp16\" not in deepspeed_plugin.deepspeed_config\n+ and \"bf16\" not in deepspeed_plugin.deepspeed_config\n+ ):\n+ deepspeed_plugin.deepspeed_config.update({\"bf16\": {\"enabled\": True}})\n self.deepspeed_plugin = deepspeed_plugin\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\n self.distributed_type = DistributedType.MULTI_GPU\n@@ -189,10 +197,11 @@ def __repr__(self):\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\n f\"Device: {self.device}\\n\"\n- f\"Mixed precision type: {mixed_precision}\\n\"\n )\n if self.distributed_type == DistributedType.DEEPSPEED:\n repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\n\"\n+ else:\n+ f\"Mixed precision type: {mixed_precision}\\n\"\n return repr\n \n # For backward compatibility\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 91dc0f7ad..89d104a49 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -26,7 +26,14 @@\n import torch\n \n from ..state import AcceleratorState\n-from ..utils import gather, is_comet_ml_available, is_tensorboard_available, is_tpu_available, is_wandb_available\n+from ..utils import (\n+ gather,\n+ is_comet_ml_available,\n+ is_deepspeed_available,\n+ is_tensorboard_available,\n+ is_tpu_available,\n+ is_wandb_available,\n+)\n \n \n def parse_flag_from_env(key, default=False):\n@@ -85,6 +92,13 @@ def require_multi_gpu(test_case):\n return unittest.skipUnless(torch.cuda.device_count() > 1, \"test requires multiple GPUs\")(test_case)\n \n \n+def require_deepspeed(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed\n+ \"\"\"\n+ return unittest.skipUnless(is_deepspeed_available(), \"test requires DeepSpeed\")(test_case)\n+\n+\n def require_tensorboard(test_case):\n \"\"\"\n Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex a39368dd3..b66b6fd1d 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -27,6 +27,7 @@\n is_sagemaker_available,\n is_tensorboard_available,\n is_tpu_available,\n+ is_transformers_available,\n is_wandb_available,\n )\n from .modeling import (\n@@ -76,7 +77,13 @@\n \n \n if is_deepspeed_available():\n- from .deepspeed import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n+ from .deepspeed import (\n+ DeepSpeedEngineWrapper,\n+ DeepSpeedOptimizerWrapper,\n+ DeepSpeedSchedulerWrapper,\n+ DummyOptim,\n+ DummyScheduler,\n+ )\n \n from .launch import PrepareForLaunch\n from .memory import find_executable_batch_size\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nindex 4a19f8b51..b731ca12a 100644\n--- a/src/accelerate/utils/dataclasses.py\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -19,8 +19,11 @@\n import copy\n import enum\n import functools\n+import io\n+import json\n import os\n import typing\n+import warnings\n from dataclasses import dataclass, field\n from datetime import timedelta\n from typing import Callable, Iterable, Optional\n@@ -208,10 +211,15 @@ class TensorInformation:\n \n @dataclass\n class DeepSpeedPlugin:\n+ \"\"\"\n+ This plugin is used to integrate DeepSpeed.\n+ \"\"\"\n \n+ config_file: str = field(default=None, metadata={\"help\": \"Path to the DeepSpeed config file.\"})\n gradient_accumulation_steps: int = field(\n default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n )\n+ gradient_clipping: float = field(default=None, metadata={\"help\": \"Enable gradient clipping with value\"})\n zero_stage: int = field(\n default=None,\n metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n@@ -220,37 +228,137 @@ class DeepSpeedPlugin:\n default=True,\n metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n )\n-\n- auto_opt_mapping: bool = field(\n- default=True,\n- metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n+ offload_optimizer_device: bool = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3.\"},\n+ )\n+ offload_param_device: bool = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3.\"},\n+ )\n+ zero3_init_flag: bool = field(\n+ default=None,\n+ metadata={\n+ \"help\": \"Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models.\"\n+ \"Only applicable with ZeRO Stage-3.\"\n+ },\n+ )\n+ zero3_save_16bit_model: bool = field(\n+ default=None,\n+ metadata={\"help\": \"Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3.\"},\n )\n-\n- offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n \n def __post_init__(self):\n-\n- if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n-\n- if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n-\n- if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n-\n- self.deepspeed_config = {\n- \"train_batch_size\": None,\n- \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n- \"zero_optimization\": {\n- \"stage\": self.zero_stage,\n- \"offload_optimizer\": {\n- \"device\": self.offload_optimizer_device,\n+ if self.config_file is None:\n+ self.config_file = os.environ.get(\"DEEPSPEED_CONFIG_FILE\", \"none\")\n+ if self.config_file != \"none\":\n+ with io.open(self.config_file, \"r\", encoding=\"utf-8\") as f:\n+ self.deepspeed_config = json.load(f)\n+ if \"gradient_accumulation_steps\" not in self.deepspeed_config:\n+ self.deepspeed_config[\"gradient_accumulation_steps\"] = 1\n+ elif self.deepspeed_config[\"gradient_accumulation_steps\"] == \"auto\":\n+ raise ValueError(\"gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config file.\")\n+ if \"zero_optimization\" not in self.deepspeed_config:\n+ raise ValueError(\"Please specify the ZeRO optimization config in the DeepSpeed config file.\")\n+ else:\n+ if self.gradient_accumulation_steps is None:\n+ self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+\n+ if self.gradient_clipping is None:\n+ gradient_clipping = os.environ.get(\"GRADIENT_CLIPPING\", \"none\")\n+ if gradient_clipping != \"none\":\n+ self.gradient_clipping = float(gradient_clipping)\n+\n+ if self.zero_stage is None:\n+ self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+\n+ if self.offload_optimizer_device is None:\n+ self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+\n+ if self.offload_param_device is None:\n+ self.offload_param_device = os.environ.get(\"DEEPSPEED_OFFLOAD_PARAM_DEVICE\", \"none\")\n+\n+ if self.zero3_save_16bit_model is None:\n+ self.zero3_save_16bit_model = os.environ.get(\"DEEPSPEED_ZERO3_SAVE_16BIT_MODEL\", \"false\") == \"true\"\n+\n+ self.deepspeed_config = {\n+ \"train_batch_size\": \"auto\",\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\n+ \"stage\": self.zero_stage,\n+ \"offload_optimizer\": {\n+ \"device\": self.offload_optimizer_device,\n+ },\n+ \"offload_param\": {\n+ \"device\": self.offload_param_device,\n+ },\n+ \"stage3_gather_16bit_weights_on_model_save\": self.zero3_save_16bit_model,\n },\n- },\n- \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n- \"zero_allow_untested_optimizer\": True,\n- }\n+ }\n+ if self.gradient_clipping:\n+ self.deepspeed_config[\"gradient_clipping\"] = self.gradient_clipping\n+ self.deepspeed_config[\"steps_per_print\"] = float(\"inf\") # this will stop deepspeed from logging @ stdout\n+ if self.zero3_init_flag is None:\n+ self.zero3_init_flag = os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\"\n+ if self.zero3_init_flag and self.deepspeed_config[\"zero_optimization\"][\"stage\"] != 3:\n+ warnings.warn(\"DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.\")\n+ self.zero3_init_flag = False\n+\n+ def find_config_node(self, ds_key_long):\n+ config = self.deepspeed_config\n+\n+ # find the config node of interest if it exists\n+ nodes = ds_key_long.split(\".\")\n+ ds_key = nodes.pop()\n+ for node in nodes:\n+ config = config.get(node)\n+ if config is None:\n+ return None, ds_key\n+\n+ return config, ds_key\n+\n+ def fill_match(self, ds_key_long, mismatches, must_match=True, **kwargs):\n+ config, ds_key = self.find_config_node(ds_key_long)\n+ if config is None:\n+ return\n+\n+ if config.get(ds_key) == \"auto\":\n+ if ds_key_long in kwargs:\n+ config[ds_key] = kwargs[ds_key_long]\n+ return\n+ else:\n+ raise ValueError(\n+ f\"`{ds_key_long}` not found in kwargs. \"\n+ f\"Please specify `{ds_key_long}` without `auto`(set to correct value) in the DeepSpeed config file or \"\n+ \"pass it in kwargs.\"\n+ )\n+\n+ if not must_match:\n+ return\n+\n+ ds_val = config.get(ds_key)\n+ if ds_val is not None and ds_key_long in kwargs:\n+ if ds_val != kwargs[ds_key_long]:\n+ mismatches.append(f\"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}\")\n+\n+ def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must_match=True, **kwargs):\n+ \"\"\"Process the DeepSpeed config with the values from the kwargs.\"\"\"\n+ mismatches = [] if mismatches is None else mismatches\n+ if config is None:\n+ config = self.deepspeed_config\n+ for key, value in config.items():\n+ if isinstance(value, dict):\n+ self.deepspeed_config_process(\n+ prefix=prefix + key + \".\", mismatches=mismatches, config=value, must_match=must_match, **kwargs\n+ )\n+ else:\n+ self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)\n+ if len(mismatches) > 0 and prefix == \"\":\n+ mismatches_msg = \"\\n\".join(mismatches)\n+ raise ValueError(\n+ \"Please correct the following DeepSpeed config values that mismatch kwargs \"\n+ f\" values:\\n{mismatches_msg}\\nThe easiest method is to set these DeepSpeed config values to 'auto'.\"\n+ )\n \n \n @dataclass\ndiff --git a/src/accelerate/utils/deepspeed.py b/src/accelerate/utils/deepspeed.py\nindex 891f6fdb8..dde1b9c63 100644\n--- a/src/accelerate/utils/deepspeed.py\n+++ b/src/accelerate/utils/deepspeed.py\n@@ -12,58 +12,34 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from ..optimizer import AcceleratedOptimizer\n-from .imports import is_apex_available, is_deepspeed_available\n-\n-\n-if is_deepspeed_available():\n- from deepspeed import DeepSpeedEngine\n+from accelerate.scheduler import AcceleratedScheduler\n \n-if is_apex_available():\n- from apex import amp\n+from ..optimizer import AcceleratedOptimizer\n \n \n-class DeepSpeedEngineWrapper(DeepSpeedEngine):\n- \"\"\"\n- Wrapper over deepspeed.DeepSpeedEngine object\n+class DeepSpeedEngineWrapper:\n \"\"\"\n+ Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop.\n \n- def __init__(self, *args, **kwargs):\n- super().__init__(*args, **kwargs)\n-\n- # overwriting micro_steps for user's gradient_accumulation\n- self.micro_steps = -1\n-\n- def step(self, lr_kwargs=None):\n- \"\"\"DeepSpeedEngine.step() without `micro_steps` update & no profiling\"\"\"\n- if self.is_gradient_accumulation_boundary(): # it shouldn't matter whether we keep this line or not\n- if self.progressive_layer_drop:\n- self.progressive_layer_drop.update_state(self.global_steps)\n+ Args:\n+ engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap\n+ \"\"\"\n \n- self._take_model_step(lr_kwargs)\n+ def __init__(self, engine):\n+ self.engine = engine\n \n def backward(self, loss):\n- \"\"\"DeepSpeedEngine.backward() with with no loss scaling; no profiling but with `micro_steps` update\"\"\"\n-\n- if self.zero_optimization():\n- self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()\n- self.optimizer.backward(loss)\n- elif self.amp_enabled():\n- # AMP requires delaying unscale when inside gradient accumulation boundaries\n- # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n- delay_unscale = not self.is_gradient_accumulation_boundary()\n- with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:\n- scaled_loss.backward()\n- elif self.fp16_enabled():\n- self.optimizer.backward(loss)\n- else:\n- loss.backward()\n+ # runs backpropagation and handles mixed precision\n+ self.engine.backward(loss)\n \n- if self.enable_backward_allreduce:\n- self.allreduce_gradients()\n-\n- # this will ensure deepspeed gradient_accumulation matches user's accumulation\n- self.micro_steps += 1\n+ # deepspeed `engine.step` performs following operations:\n+ # gradient accumulation check\n+ # gradient clipping\n+ # optimizer step\n+ # zero grad\n+ # checking overflow\n+ # lr_scheduler step\n+ self.engine.step()\n \n \n class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):\n@@ -75,22 +51,79 @@ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):\n The optimizer to wrap.\n \"\"\"\n \n- def __init__(self, optimizer, model: DeepSpeedEngineWrapper):\n+ def __init__(self, optimizer):\n super().__init__(optimizer, device_placement=False, scaler=None)\n \n- self.model = model\n-\n def zero_grad(self, set_to_none=None):\n- pass # `model.step()` is doing that automatically. Therefore, it's implementation is not needed\n+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n \n def step(self):\n- \"\"\"This will handle optimizer.step() & optimizer.zero_grad() with gradient_accumulation\"\"\"\n- self.model.step()\n+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n \n @property\n- def is_overflow(self):\n+ def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n- overflow = False\n- if hasattr(self.optimizer, \"overflow\"):\n- overflow = self.optimizer.overflow\n- return overflow\n+ return self.optimizer.overflow\n+\n+\n+class DeepSpeedSchedulerWrapper(AcceleratedScheduler):\n+ \"\"\"\n+ Internal wrapper around a deepspeed scheduler.\n+\n+ Args:\n+ scheduler (`torch.optim.lr_scheduler.LambdaLR`):\n+ The scheduler to wrap.\n+ optimizers (one or a list of `torch.optim.Optimizer`):\n+ \"\"\"\n+\n+ def __init__(self, scheduler, optimizers):\n+ super().__init__(scheduler, optimizers)\n+\n+ def step(self):\n+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n+\n+\n+class DummyOptim:\n+ \"\"\"\n+ Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training\n+ loop when optimizer config is specified in the deepspeed config file.\n+\n+ Args:\n+ lr (float):\n+ Learning rate.\n+ params (iterable): iterable of parameters to optimize or dicts defining\n+ parameter groups\n+ weight_decay (float):\n+ Weight decay.\n+ **kwargs:\n+ Other arguments.\n+ \"\"\"\n+\n+ def __init__(self, params, lr=0.001, weight_decay=0, **kwargs):\n+ self.params = params\n+ self.lr = lr\n+ self.weight_decay = weight_decay\n+ self.kwargs = kwargs\n+\n+\n+class DummyScheduler:\n+ \"\"\"\n+ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training\n+ loop when scheduler config is specified in the deepspeed config file.\n+\n+ Args:\n+ optimizer (`torch.optim.optimizer.Optimizer`):\n+ The optimizer to wrap.\n+ total_num_steps (int):\n+ Total number of steps.\n+ warmup_num_steps (int):\n+ Number of steps for warmup.\n+ **kwargs:\n+ Other arguments.\n+ \"\"\"\n+\n+ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):\n+ self.optimizer = optimizer\n+ self.total_num_steps = total_num_steps\n+ self.warmup_num_steps = warmup_num_steps\n+ self.kwargs = kwargs\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 947508726..c8cce9587 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -63,6 +63,10 @@ def is_deepspeed_available():\n return False\n \n \n+def is_transformers_available():\n+ return importlib.util.find_spec(\"transformers\") is not None\n+\n+\n def is_tensorboard_available():\n return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n \ndiff --git a/tests/deepspeed/ds_config_zero2.json b/tests/deepspeed/ds_config_zero2.json\nnew file mode 100644\nindex 000000000..f031969ee\n--- /dev/null\n+++ b/tests/deepspeed/ds_config_zero2.json\n@@ -0,0 +1,49 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": \"auto\",\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"bf16\": {\n+ \"enabled\": \"auto\"\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 2,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/tests/deepspeed/ds_config_zero3.json b/tests/deepspeed/ds_config_zero3.json\nnew file mode 100644\nindex 000000000..846cd7361\n--- /dev/null\n+++ b/tests/deepspeed/ds_config_zero3.json\n@@ -0,0 +1,56 @@\n+{\n+ \"fp16\": {\n+ \"enabled\": \"auto\",\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"bf16\": {\n+ \"enabled\": \"auto\"\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"offload_param\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"overlap_comm\": true,\n+ \"contiguous_gradients\": true,\n+ \"sub_group_size\": 1e9,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"stage3_prefetch_bucket_size\": \"auto\",\n+ \"stage3_param_persistence_threshold\": \"auto\",\n+ \"stage3_max_live_parameters\": 1e9,\n+ \"stage3_max_reuse_distance\": 1e9,\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\"\n+ },\n+ \"gradient_accumulation_steps\": 1,\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n\\ No newline at end of file\ndiff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py\nnew file mode 100644\nindex 000000000..dbe64b5c7\n--- /dev/null\n+++ b/tests/deepspeed/test_deepspeed.py\n@@ -0,0 +1,581 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import io\n+import itertools\n+import json\n+import os\n+import tempfile\n+import unittest\n+from copy import deepcopy\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate.accelerator import Accelerator\n+from accelerate.scheduler import AcceleratedScheduler\n+from accelerate.state import AcceleratorState\n+from accelerate.test_utils.testing import require_cuda, require_deepspeed\n+from accelerate.test_utils.training import RegressionDataset\n+from accelerate.utils.dataclasses import DeepSpeedPlugin\n+from accelerate.utils.deepspeed import (\n+ DeepSpeedEngineWrapper,\n+ DeepSpeedOptimizerWrapper,\n+ DeepSpeedSchedulerWrapper,\n+ DummyOptim,\n+ DummyScheduler,\n+)\n+from parameterized import parameterized\n+from transformers import AutoModel, AutoModelForCausalLM, get_scheduler\n+from transformers.deepspeed import HfDeepSpeedConfig\n+from transformers.testing_utils import mockenv_context\n+from transformers.trainer_utils import set_seed\n+from transformers.utils import is_torch_bf16_available\n+\n+\n+set_seed(42)\n+\n+T5_SMALL = \"t5-small\"\n+T5_TINY = \"patrickvonplaten/t5-tiny-random\"\n+GPT2_TINY = \"sshleifer/tiny-gpt2\"\n+\n+ZERO2 = \"zero2\"\n+ZERO3 = \"zero3\"\n+\n+FP16 = \"fp16\"\n+BF16 = \"bf16\"\n+\n+CUSTOM_OPTIMIZER = \"custom_optimizer\"\n+CUSTOM_SCHEDULER = \"custom_scheduler\"\n+DS_OPTIMIZER = \"deepspeed_optimizer\"\n+DS_SCHEDULER = \"deepspeed_scheduler\"\n+\n+stages = [ZERO2, ZERO3]\n+optims = [CUSTOM_OPTIMIZER, DS_OPTIMIZER]\n+schedulers = [CUSTOM_SCHEDULER, DS_SCHEDULER]\n+if is_torch_bf16_available():\n+ dtypes = [FP16, BF16]\n+else:\n+ dtypes = [FP16]\n+\n+\n+def parameterized_custom_name_func(func, param_num, param):\n+ # customize the test name generator function as we want both params to appear in the sub-test\n+ # name, as by default it shows only the first param\n+ param_based_name = parameterized.to_safe_name(\"_\".join(str(x) for x in param.args))\n+ return f\"{func.__name__}_{param_based_name}\"\n+\n+\n+# Cartesian-product of zero stages with models to test\n+params = list(itertools.product(stages, dtypes))\n+optim_scheduler_params = list(itertools.product(optims, schedulers))\n+\n+\n+@require_deepspeed\n+@require_cuda\n+class DeepSpeedConfigIntegration(unittest.TestCase):\n+ def setUp(self):\n+ super().setUp()\n+\n+ self._test_file_path = inspect.getfile(self.__class__)\n+ path = Path(self._test_file_path).resolve()\n+ self.test_file_dir_str = str(path.parents[0])\n+\n+ self.ds_config_file = dict(\n+ zero2=f\"{self.test_file_dir_str}/ds_config_zero2.json\",\n+ zero3=f\"{self.test_file_dir_str}/ds_config_zero3.json\",\n+ )\n+\n+ # use self.get_config_dict(stage) to use these to ensure the original is not modified\n+ with io.open(self.ds_config_file[ZERO2], \"r\", encoding=\"utf-8\") as f:\n+ config_zero2 = json.load(f)\n+ with io.open(self.ds_config_file[ZERO3], \"r\", encoding=\"utf-8\") as f:\n+ config_zero3 = json.load(f)\n+ # The following setting slows things down, so don't enable it by default unless needed by a test.\n+ # It's in the file as a demo for users since we want everything to work out of the box even if slower.\n+ config_zero3[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"] = False\n+\n+ self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3)\n+\n+ self.dist_env = dict(\n+ USE_DEEPSPEED=\"true\",\n+ MASTER_ADDR=\"localhost\",\n+ MASTER_PORT=\"10999\",\n+ RANK=\"0\",\n+ LOCAL_RANK=\"0\",\n+ WORLD_SIZE=\"1\",\n+ )\n+\n+ def get_config_dict(self, stage):\n+ # As some tests modify the dict, always make a copy\n+ return deepcopy(self.ds_config_dict[stage])\n+\n+ @parameterized.expand(stages, name_func=parameterized_custom_name_func)\n+ def test_deepspeed_plugin(self, stage):\n+\n+ # Test zero3_init_flag will be set to False when ZeRO stage != 3\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ zero3_init_flag=True,\n+ )\n+ self.assertFalse(deepspeed_plugin.zero3_init_flag)\n+ deepspeed_plugin.deepspeed_config = None\n+\n+ # Test zero3_init_flag will be set to True only when ZeRO stage == 3\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=3,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ zero3_init_flag=True,\n+ )\n+ self.assertTrue(deepspeed_plugin.zero3_init_flag)\n+ deepspeed_plugin.deepspeed_config = None\n+\n+ # Test config files are loaded correctly\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[stage], zero3_init_flag=True)\n+ if stage == ZERO2:\n+ self.assertFalse(deepspeed_plugin.zero3_init_flag)\n+ elif stage == ZERO3:\n+ self.assertTrue(deepspeed_plugin.zero3_init_flag)\n+ deepspeed_plugin.deepspeed_config = None\n+\n+ # Test `gradient_accumulation_steps` is set to 1 if unavailable in config file\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ ds_config = self.get_config_dict(stage)\n+ del ds_config[\"gradient_accumulation_steps\"]\n+ with open(os.path.join(dirpath, \"ds_config.json\"), \"w\") as out_file:\n+ json.dump(ds_config, out_file)\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=os.path.join(dirpath, \"ds_config.json\"))\n+ self.assertEqual(deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"], 1)\n+ deepspeed_plugin.deepspeed_config = None\n+\n+ # Test `ValueError` is raised if `zero_optimization` is unavailable in config file\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ ds_config = self.get_config_dict(stage)\n+ del ds_config[\"zero_optimization\"]\n+ with open(os.path.join(dirpath, \"ds_config.json\"), \"w\") as out_file:\n+ json.dump(ds_config, out_file)\n+ with self.assertRaises(ValueError) as cm:\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=os.path.join(dirpath, \"ds_config.json\"))\n+ self.assertTrue(\n+ \"Please specify the ZeRO optimization config in the DeepSpeed config file.\" in str(cm.exception)\n+ )\n+ deepspeed_plugin.deepspeed_config = None\n+\n+ # Test `deepspeed_config_process`\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[stage])\n+ kwargs = {\n+ \"fp16.enabled\": True,\n+ \"bf16.enabled\": False,\n+ \"optimizer.params.lr\": 5e-5,\n+ \"optimizer.params.weight_decay\": 0.0,\n+ \"scheduler.params.warmup_min_lr\": 0.0,\n+ \"scheduler.params.warmup_max_lr\": 5e-5,\n+ \"scheduler.params.warmup_num_steps\": 0,\n+ \"train_micro_batch_size_per_gpu\": 16,\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": 16,\n+ \"zero_optimization.reduce_bucket_size\": 5e5,\n+ \"zero_optimization.stage3_prefetch_bucket_size\": 5e5,\n+ \"zero_optimization.stage3_param_persistence_threshold\": 5e5,\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": False,\n+ }\n+ deepspeed_plugin.deepspeed_config_process(**kwargs)\n+ for ds_key_long, value in kwargs.items():\n+ config, ds_key = deepspeed_plugin.find_config_node(ds_key_long)\n+ if config.get(ds_key) is not None:\n+ self.assertEqual(config.get(ds_key), value)\n+\n+ # Test mismatches\n+ mismatches = {\n+ \"optimizer.params.lr\": 1e-5,\n+ \"optimizer.params.weight_decay\": 1e-5,\n+ \"gradient_accumulation_steps\": 2,\n+ }\n+ with self.assertRaises(ValueError) as cm:\n+ new_kwargs = deepcopy(kwargs)\n+ new_kwargs.update(mismatches)\n+ deepspeed_plugin.deepspeed_config_process(**new_kwargs)\n+ for key in mismatches.keys():\n+ self.assertTrue(\n+ key in str(cm.exception),\n+ f\"{key} is not in the exception message:\\n{cm.exception}\",\n+ )\n+\n+ # Test `ValueError` is raised if some config file fields with `auto` value is missing in `kwargs`\n+ deepspeed_plugin.deepspeed_config[\"optimizer\"][\"params\"][\"lr\"] = \"auto\"\n+ with self.assertRaises(ValueError) as cm:\n+ del kwargs[\"optimizer.params.lr\"]\n+ deepspeed_plugin.deepspeed_config_process(**kwargs)\n+ self.assertTrue(\"`optimizer.params.lr` not found in kwargs.\" in str(cm.exception))\n+\n+ @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)\n+ def test_accelerate_state_deepspeed(self, dtype):\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=ZERO2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ zero3_init_flag=True,\n+ )\n+ with mockenv_context(**self.dist_env):\n+ state = AcceleratorState(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin, _from_accelerator=True)\n+ self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype][\"enabled\"])\n+ state.initialized = False\n+\n+ def test_init_zero3(self):\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=3,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=True,\n+ zero3_init_flag=True,\n+ )\n+\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ self.assertTrue(\"dschf\" in accelerator.__dict__)\n+ self.assertTrue(type(accelerator.dschf) == HfDeepSpeedConfig)\n+\n+ @parameterized.expand(optim_scheduler_params, name_func=parameterized_custom_name_func)\n+ def test_prepare_deepspeed(self, optim_type, scheduler_type):\n+ # 1. Testing with one of the ZeRO Stages is enough to test the `_prepare_deepspeed` function.\n+ # Here we test using ZeRO Stage 2 with FP16 enabled.\n+ from deepspeed.runtime.engine import DeepSpeedEngine\n+\n+ kwargs = {\n+ \"fp16.enabled\": True,\n+ \"bf16.enabled\": False,\n+ \"optimizer.params.lr\": 5e-5,\n+ \"optimizer.params.weight_decay\": 0.0,\n+ \"scheduler.params.warmup_min_lr\": 0.0,\n+ \"scheduler.params.warmup_max_lr\": 5e-5,\n+ \"scheduler.params.warmup_num_steps\": 0,\n+ \"train_micro_batch_size_per_gpu\": 16,\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": 16,\n+ \"zero_optimization.reduce_bucket_size\": 5e5,\n+ \"zero_optimization.stage3_prefetch_bucket_size\": 5e5,\n+ \"zero_optimization.stage3_param_persistence_threshold\": 5e5,\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": False,\n+ }\n+\n+ if optim_type == CUSTOM_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:\n+ # Test custom optimizer + custom scheduler\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ gradient_accumulation_steps=1,\n+ gradient_clipping=1.0,\n+ zero_stage=2,\n+ offload_optimizer_device=\"cpu\",\n+ offload_param_device=\"cpu\",\n+ zero3_save_16bit_model=False,\n+ zero3_init_flag=False,\n+ )\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(mixed_precision=\"fp16\", deepspeed_plugin=deepspeed_plugin)\n+ self.assertEqual(accelerator.state.deepspeed_plugin.config_file, \"none\")\n+\n+ train_set = RegressionDataset(length=80)\n+ eval_set = RegressionDataset(length=20)\n+ train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)\n+ eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)\n+ model = AutoModel.from_pretrained(GPT2_TINY)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n+ lr_scheduler = get_scheduler(\n+ name=\"linear\",\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=1000,\n+ )\n+ dummy_optimizer = DummyOptim(params=model.parameters())\n+ dummy_lr_scheduler = DummyScheduler(dummy_optimizer)\n+\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ self.assertTrue(\n+ \"You cannot create a `DummyOptim` without specifying an optimizer in the config file.\"\n+ in str(cm.exception)\n+ )\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler\n+ )\n+ self.assertTrue(\n+ \"You cannot create a `DummyScheduler` without specifying a scheduler in the config file.\"\n+ in str(cm.exception)\n+ )\n+\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)\n+ self.assertTrue(\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ in str(cm.exception)\n+ )\n+\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ self.assertTrue(accelerator.deepspeed_config[\"zero_allow_untested_optimizer\"])\n+ self.assertTrue(accelerator.deepspeed_config[\"train_batch_size\"], 16)\n+ self.assertEqual(type(model), DeepSpeedEngine)\n+ self.assertEqual(type(optimizer), DeepSpeedOptimizerWrapper)\n+ self.assertEqual(type(lr_scheduler), AcceleratedScheduler)\n+ self.assertEqual(type(accelerator.deepspeed_engine_wrapped), DeepSpeedEngineWrapper)\n+\n+ elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER:\n+ # Test DeepSpeed optimizer + DeepSpeed scheduler\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[ZERO2])\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ train_set = RegressionDataset(length=80)\n+ eval_set = RegressionDataset(length=20)\n+ train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n+ eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)\n+ model = AutoModel.from_pretrained(GPT2_TINY)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n+ lr_scheduler = get_scheduler(\n+ name=\"linear\",\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=1000,\n+ )\n+ dummy_optimizer = DummyOptim(params=model.parameters())\n+ dummy_lr_scheduler = DummyScheduler(dummy_optimizer)\n+ kwargs[\"train_batch_size\"] = (\n+ kwargs[\"train_micro_batch_size_per_gpu\"]\n+ * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n+ * accelerator.num_processes\n+ )\n+ accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler\n+ )\n+ self.assertTrue(\n+ \"You cannot specify an optimizer in the config file and in the code at the same time\"\n+ in str(cm.exception)\n+ )\n+\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ self.assertTrue(\n+ \"You cannot specify a scheduler in the config file and in the code at the same time\"\n+ in str(cm.exception)\n+ )\n+\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ self.assertTrue(\n+ \"You cannot specify a scheduler in the config file and in the code at the same time\"\n+ in str(cm.exception)\n+ )\n+\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler\n+ )\n+ self.assertTrue(type(model) == DeepSpeedEngine)\n+ self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper)\n+ self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper)\n+ self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)\n+\n+ elif optim_type == CUSTOM_OPTIMIZER and scheduler_type == DS_SCHEDULER:\n+ # Test custom optimizer + DeepSpeed scheduler\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[ZERO2])\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ train_set = RegressionDataset(length=80)\n+ eval_set = RegressionDataset(length=20)\n+ train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n+ eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)\n+ model = AutoModel.from_pretrained(GPT2_TINY)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n+ lr_scheduler = get_scheduler(\n+ name=\"linear\",\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=1000,\n+ )\n+ dummy_optimizer = DummyOptim(params=model.parameters())\n+ dummy_lr_scheduler = DummyScheduler(dummy_optimizer)\n+ kwargs[\"train_batch_size\"] = (\n+ kwargs[\"train_micro_batch_size_per_gpu\"]\n+ * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n+ * accelerator.num_processes\n+ )\n+ accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)\n+ del accelerator.state.deepspeed_plugin.deepspeed_config[\"optimizer\"]\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler\n+ )\n+ self.assertTrue(type(model) == DeepSpeedEngine)\n+ self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper)\n+ self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper)\n+ self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)\n+ elif optim_type == DS_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:\n+ # Test deepspeed optimizer + custom scheduler\n+ deepspeed_plugin = DeepSpeedPlugin(config_file=self.ds_config_file[ZERO2])\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ train_set = RegressionDataset(length=80)\n+ eval_set = RegressionDataset(length=20)\n+ train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)\n+ eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)\n+ model = AutoModel.from_pretrained(GPT2_TINY)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n+ lr_scheduler = get_scheduler(\n+ name=\"linear\",\n+ optimizer=optimizer,\n+ num_warmup_steps=0,\n+ num_training_steps=1000,\n+ )\n+ dummy_optimizer = DummyOptim(params=model.parameters())\n+ dummy_lr_scheduler = DummyScheduler(dummy_optimizer)\n+ kwargs[\"train_batch_size\"] = (\n+ kwargs[\"train_micro_batch_size_per_gpu\"]\n+ * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n+ * accelerator.num_processes\n+ )\n+ accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)\n+ del accelerator.state.deepspeed_plugin.deepspeed_config[\"scheduler\"]\n+ with self.assertRaises(ValueError) as cm:\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ self.assertTrue(\n+ \"You can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`.\"\n+ in str(cm.exception)\n+ )\n+ accelerator.state.initialized = False\n+\n+ def test_save_checkpoints(self):\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ config_file=self.ds_config_file[ZERO3],\n+ zero3_init_flag=True,\n+ )\n+ del deepspeed_plugin.deepspeed_config[\"bf16\"]\n+ kwargs = {\n+ \"fp16.enabled\": True,\n+ \"bf16.enabled\": False,\n+ \"optimizer.params.lr\": 5e-5,\n+ \"optimizer.params.weight_decay\": 0.0,\n+ \"scheduler.params.warmup_min_lr\": 0.0,\n+ \"scheduler.params.warmup_max_lr\": 5e-5,\n+ \"scheduler.params.warmup_num_steps\": 0,\n+ \"train_micro_batch_size_per_gpu\": 16,\n+ \"gradient_clipping\": 1.0,\n+ \"train_batch_size\": 16,\n+ \"zero_optimization.reduce_bucket_size\": 5e5,\n+ \"zero_optimization.stage3_prefetch_bucket_size\": 5e5,\n+ \"zero_optimization.stage3_param_persistence_threshold\": 5e5,\n+ \"zero_optimization.stage3_gather_16bit_weights_on_model_save\": False,\n+ }\n+\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ kwargs[\"train_batch_size\"] = (\n+ kwargs[\"train_micro_batch_size_per_gpu\"]\n+ * deepspeed_plugin.deepspeed_config[\"gradient_accumulation_steps\"]\n+ * accelerator.num_processes\n+ )\n+ accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)\n+\n+ train_set = RegressionDataset(length=80)\n+ eval_set = RegressionDataset(length=20)\n+ train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)\n+ eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)\n+ model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ dummy_optimizer = DummyOptim(params=model.parameters())\n+ dummy_lr_scheduler = DummyScheduler(dummy_optimizer)\n+\n+ model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler\n+ )\n+ with self.assertRaises(ValueError) as cm:\n+ accelerator.get_state_dict(model)\n+ msg = (\n+ \"Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. \"\n+ \"To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or \"\n+ \"set `zero3_save_16bit_model` to True when using `accelerate config`. \"\n+ \"To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.\"\n+ )\n+ self.assertTrue(msg in str(cm.exception))\n+ accelerator.state.initialized = False\n+\n+ def test_autofill_dsconfig(self):\n+ deepspeed_plugin = DeepSpeedPlugin(\n+ config_file=self.ds_config_file[ZERO3],\n+ zero3_init_flag=True,\n+ )\n+ del deepspeed_plugin.deepspeed_config[\"bf16\"]\n+ del deepspeed_plugin.deepspeed_config[\"fp16\"]\n+\n+ with mockenv_context(**self.dist_env):\n+ accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)\n+ train_set = RegressionDataset(length=80)\n+ eval_set = RegressionDataset(length=20)\n+ train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)\n+ eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)\n+ model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4)\n+ dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000)\n+ hidden_size = model.config.hidden_size\n+ model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(\n+ model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler\n+ )\n+ self.assertEqual(accelerator.deepspeed_config[\"train_micro_batch_size_per_gpu\"], 16)\n+ self.assertEqual(accelerator.deepspeed_config[\"train_batch_size\"], 16)\n+\n+ self.assertEqual(accelerator.deepspeed_config[\"optimizer\"][\"params\"][\"lr\"], 5e-5)\n+ self.assertEqual(accelerator.deepspeed_config[\"optimizer\"][\"params\"][\"weight_decay\"], 1e-4)\n+\n+ self.assertEqual(accelerator.deepspeed_config[\"scheduler\"][\"params\"][\"warmup_min_lr\"], 0.0)\n+ self.assertEqual(accelerator.deepspeed_config[\"scheduler\"][\"params\"][\"warmup_max_lr\"], 5e-5)\n+ self.assertEqual(accelerator.deepspeed_config[\"scheduler\"][\"params\"][\"warmup_num_steps\"], 10)\n+\n+ self.assertEqual(accelerator.deepspeed_config[\"gradient_clipping\"], 1.0)\n+ self.assertEqual(\n+ accelerator.deepspeed_config[\"zero_optimization\"][\"reduce_bucket_size\"], hidden_size * hidden_size\n+ )\n+ self.assertEqual(\n+ accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_prefetch_bucket_size\"],\n+ 0.9 * hidden_size * hidden_size,\n+ )\n+ self.assertEqual(\n+ accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_param_persistence_threshold\"],\n+ 10 * hidden_size,\n+ )\n+ self.assertFalse(\n+ accelerator.deepspeed_config[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"]\n+ )\n+ accelerator.state.initialized = False\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 8f277b932..6ad4a3ca8 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -32,7 +32,13 @@\n # Should mock `{script_name}.get_dataloaders` via:\n # @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n \n-EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\", \"memory.py\", \"fsdp_with_peak_mem_tracking.py\"]\n+EXCLUDE_EXAMPLES = [\n+ \"cross_validation.py\",\n+ \"multi_process_metrics.py\",\n+ \"memory.py\",\n+ \"fsdp_with_peak_mem_tracking.py\",\n+ \"deepspeed_with_config_support.py\",\n+]\n \n \n class ExampleDifferenceTests(unittest.TestCase):\n", "code_comments": [ { "body": "```suggestion\r\n \"DeepSpeed is not installed => run `pip install deepspeed` or build it from source.\"\r\n```\r\nWe usually just say pip in those error messages.", "diff_hunk": "@@ -163,6 +172,25 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ if deepspeed_plugin:\n+ if not is_deepspeed_available():\n+ raise ImportError(\n+ \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\"", "from_author": false }, { "body": "```suggestion\r\n \"Please run `pip install transformers`.\"\r\n```", "diff_hunk": "@@ -163,6 +172,25 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ if deepspeed_plugin:\n+ if not is_deepspeed_available():\n+ raise ImportError(\n+ \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\"\n+ )\n+ if compare_versions(\"deepspeed\", \"<\", \"0.6.4\"):\n+ raise ImportError(\"DeepSpeed version must be >= 0.6.4. Please update DeepSpeed.\")\n+ if os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\" or deepspeed_plugin.zero3_init_flag:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip3 install transformers`.\"", "from_author": false }, { "body": "Ultimately, we will want this object to live in Accelerate, not in Transformers. I don't know when is the best time to move it, just putting it as a general comment :-)", "diff_hunk": "@@ -163,6 +172,25 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ if deepspeed_plugin:\n+ if not is_deepspeed_available():\n+ raise ImportError(\n+ \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\"\n+ )\n+ if compare_versions(\"deepspeed\", \"<\", \"0.6.4\"):\n+ raise ImportError(\"DeepSpeed version must be >= 0.6.4. Please update DeepSpeed.\")\n+ if os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\" or deepspeed_plugin.zero3_init_flag:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip3 install transformers`.\"\n+ )\n+ from transformers.deepspeed import HfDeepSpeedConfig", "from_author": false }, { "body": "Put the comment above so everything fits on one line.", "diff_hunk": "@@ -499,81 +527,116 @@ def _prepare_deepspeed(self, *args):\n deepspeed_plugin = self.state.deepspeed_plugin\n self.deepspeed_config = deepspeed_plugin.deepspeed_config\n \n- batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n- if len(batch_sizes) == 0:\n- raise ValueError(\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n- )\n-\n- batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n- if len(batch_sizes) > 1:\n- logger.info(\n- f\"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \\\n- {deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n- )\n-\n- self.deepspeed_config[\"train_batch_size\"] = (\n- batch_size_per_device * deepspeed_plugin.gradient_accumulation_steps * self.num_processes\n- )\n-\n result = [\n self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n for obj in args\n ]\n \n+ if deepspeed_plugin.config_file == \"none\":\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ if self.split_batches:\n+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n+ if len(batch_sizes) == 0:\n+ raise ValueError(\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ )\n+\n+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n+ if len(batch_sizes) > 1:\n+ logger.info(\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\n+ f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n+ )\n+\n+ self.deepspeed_config[\"train_batch_size\"] = (\n+ batch_size_per_device * self.deepspeed_config[\"gradient_accumulation_steps\"] * self.num_processes\n+ )\n+\n model = None\n optimizer = None\n+ scheduler = None\n for obj in result:\n if isinstance(obj, torch.nn.Module):\n model = obj\n- elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):\n optimizer = obj\n+ elif (isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n+ ):\n+ scheduler = obj\n \n- if deepspeed_plugin.auto_opt_mapping:\n- is_adam = isinstance(optimizer, torch.optim.Adam)\n- is_adamw = isinstance(optimizer, torch.optim.AdamW)\n- if (is_adam or is_adamw) and deepspeed_plugin.offload_optimizer_device == \"cpu\":\n- defaults = optimizer.defaults\n- params = []\n- for group in optimizer.param_groups:\n- params.extend(group[\"params\"])\n-\n- optimizer = deepspeed.ops.adam.DeepSpeedCPUAdam(\n- params,\n- lr=defaults[\"lr\"],\n- bias_correction=True,\n- betas=defaults[\"betas\"],\n- eps=defaults[\"eps\"],\n- weight_decay=defaults[\"weight_decay\"],\n- amsgrad=defaults[\"amsgrad\"],\n- adamw_mode=is_adamw,\n- )\n+ if \"optimizer\" in self.deepspeed_config and not isinstance(optimizer, (DummyOptim)):\n+ raise ValueError(\n+ \"You cannot specify an optimizer in the config file and in the code at the same time. \"\n+ \"Please remove the optimizer from the config file or \"\n+ \"create `accelerate.utils.DummyOptim` in the code.\"\n+ )\n+ elif \"optimizer\" not in self.deepspeed_config and isinstance(optimizer, (DummyOptim)):\n+ raise ValueError(\"You cannot create a `DummyOptim` without specifying an optimizer in the config file.\")\n \n- # useful when only eval_dataloader is given into `accelerator.prepare()`\n- if model is not None:\n- engine = DeepSpeedEngineWrapper(\n- args=None,\n- model=model,\n- optimizer=optimizer,\n- config_params=self.deepspeed_config,\n- dist_init_required=False,\n+ if isinstance(optimizer, (torch.optim.Optimizer)):\n+ self.deepspeed_config[\"zero_allow_untested_optimizer\"] = True\n+\n+ if \"scheduler\" in self.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You cannot specify a scheduler in the config file and in the code at the same time. \"\n+ \"Please remove the scheduler from the config file or \"\n+ \"create `accelerate.utils.DummyScheduler` in the code.\"\n+ )\n+ elif \"scheduler\" not in self.deepspeed_config and isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\"You cannot create a `DummyScheduler` without specifying a scheduler in the config file.\")\n+\n+ if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You can only specify `accelerate.utils.DummyScheduler` in the code when using \"\n+ \"`accelerate.utils.DummyOptim`.\"\n )\n+\n+ if model is not None:\n+ model_parameters = None\n+ if isinstance(optimizer, (DummyOptim)):\n+ model_parameters = optimizer.param_groups\n+ kwargs = dict(model=model, model_parameters=model_parameters, config_params=self.deepspeed_config)\n+ else:\n+ kwargs = dict(model=model, optimizer=optimizer, config_params=self.deepspeed_config)\n+ if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:\n+ kwargs[\"lr_scheduler\"] = scheduler\n+\n+ engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)\n+ optimizer = DeepSpeedOptimizerWrapper(optimizer)\n+ if lr_scheduler is None:\n+ scheduler = AcceleratedScheduler(\n+ scheduler,\n+ optimizer,\n+ step_with_optimizer=self.step_scheduler_with_optimizer,\n+ split_batches=self.split_batches,\n+ )\n+ else:\n+ scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)\n+\n for i in range(len(result)):\n if isinstance(result[i], torch.nn.Module):\n result[i] = engine\n- elif isinstance(result[i], torch.optim.Optimizer):\n- result[i] = DeepSpeedOptimizerWrapper(engine.optimizer, engine)\n- self.deepspeed_engine = engine # pointing for deepspeed_engine.backward()\n+ elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):\n+ result[i] = optimizer\n+ elif (isinstance(result[i], (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n+ ):\n+ result[i] = scheduler\n+ self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(\n+ engine\n+ ) # pointing for deepspeed_engine_wrapped.backward()", "from_author": false }, { "body": "Let's take this PR as an opportunity to remove those assers and use proper exceptions :-)", "diff_hunk": "@@ -499,81 +527,116 @@ def _prepare_deepspeed(self, *args):\n deepspeed_plugin = self.state.deepspeed_plugin\n self.deepspeed_config = deepspeed_plugin.deepspeed_config\n \n- batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n- if len(batch_sizes) == 0:\n- raise ValueError(\n- \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n- )\n-\n- batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n- if len(batch_sizes) > 1:\n- logger.info(\n- f\"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \\\n- {deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n- )\n-\n- self.deepspeed_config[\"train_batch_size\"] = (\n- batch_size_per_device * deepspeed_plugin.gradient_accumulation_steps * self.num_processes\n- )\n-\n result = [\n self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n for obj in args\n ]\n \n+ if deepspeed_plugin.config_file == \"none\":\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ if self.split_batches:\n+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]\n+ if len(batch_sizes) == 0:\n+ raise ValueError(\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ )\n+\n+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n+ if len(batch_sizes) > 1:\n+ logger.info(\n+ \"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \"\n+ f\"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n+ )\n+\n+ self.deepspeed_config[\"train_batch_size\"] = (\n+ batch_size_per_device * self.deepspeed_config[\"gradient_accumulation_steps\"] * self.num_processes\n+ )\n+\n model = None\n optimizer = None\n+ scheduler = None\n for obj in result:\n if isinstance(obj, torch.nn.Module):\n model = obj\n- elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):\n optimizer = obj\n+ elif (isinstance(obj, (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n+ ):\n+ scheduler = obj\n \n- if deepspeed_plugin.auto_opt_mapping:\n- is_adam = isinstance(optimizer, torch.optim.Adam)\n- is_adamw = isinstance(optimizer, torch.optim.AdamW)\n- if (is_adam or is_adamw) and deepspeed_plugin.offload_optimizer_device == \"cpu\":\n- defaults = optimizer.defaults\n- params = []\n- for group in optimizer.param_groups:\n- params.extend(group[\"params\"])\n-\n- optimizer = deepspeed.ops.adam.DeepSpeedCPUAdam(\n- params,\n- lr=defaults[\"lr\"],\n- bias_correction=True,\n- betas=defaults[\"betas\"],\n- eps=defaults[\"eps\"],\n- weight_decay=defaults[\"weight_decay\"],\n- amsgrad=defaults[\"amsgrad\"],\n- adamw_mode=is_adamw,\n- )\n+ if \"optimizer\" in self.deepspeed_config and not isinstance(optimizer, (DummyOptim)):\n+ raise ValueError(\n+ \"You cannot specify an optimizer in the config file and in the code at the same time. \"\n+ \"Please remove the optimizer from the config file or \"\n+ \"create `accelerate.utils.DummyOptim` in the code.\"\n+ )\n+ elif \"optimizer\" not in self.deepspeed_config and isinstance(optimizer, (DummyOptim)):\n+ raise ValueError(\"You cannot create a `DummyOptim` without specifying an optimizer in the config file.\")\n \n- # useful when only eval_dataloader is given into `accelerator.prepare()`\n- if model is not None:\n- engine = DeepSpeedEngineWrapper(\n- args=None,\n- model=model,\n- optimizer=optimizer,\n- config_params=self.deepspeed_config,\n- dist_init_required=False,\n+ if isinstance(optimizer, (torch.optim.Optimizer)):\n+ self.deepspeed_config[\"zero_allow_untested_optimizer\"] = True\n+\n+ if \"scheduler\" in self.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You cannot specify a scheduler in the config file and in the code at the same time. \"\n+ \"Please remove the scheduler from the config file or \"\n+ \"create `accelerate.utils.DummyScheduler` in the code.\"\n+ )\n+ elif \"scheduler\" not in self.deepspeed_config and isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\"You cannot create a `DummyScheduler` without specifying a scheduler in the config file.\")\n+\n+ if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):\n+ raise ValueError(\n+ \"You can only specify `accelerate.utils.DummyScheduler` in the code when using \"\n+ \"`accelerate.utils.DummyOptim`.\"\n )\n+\n+ if model is not None:\n+ model_parameters = None\n+ if isinstance(optimizer, (DummyOptim)):\n+ model_parameters = optimizer.param_groups\n+ kwargs = dict(model=model, model_parameters=model_parameters, config_params=self.deepspeed_config)\n+ else:\n+ kwargs = dict(model=model, optimizer=optimizer, config_params=self.deepspeed_config)\n+ if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:\n+ kwargs[\"lr_scheduler\"] = scheduler\n+\n+ engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)\n+ optimizer = DeepSpeedOptimizerWrapper(optimizer)\n+ if lr_scheduler is None:\n+ scheduler = AcceleratedScheduler(\n+ scheduler,\n+ optimizer,\n+ step_with_optimizer=self.step_scheduler_with_optimizer,\n+ split_batches=self.split_batches,\n+ )\n+ else:\n+ scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)\n+\n for i in range(len(result)):\n if isinstance(result[i], torch.nn.Module):\n result[i] = engine\n- elif isinstance(result[i], torch.optim.Optimizer):\n- result[i] = DeepSpeedOptimizerWrapper(engine.optimizer, engine)\n- self.deepspeed_engine = engine # pointing for deepspeed_engine.backward()\n+ elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):\n+ result[i] = optimizer\n+ elif (isinstance(result[i], (torch.optim.lr_scheduler._LRScheduler, DummyScheduler))) or (\n+ type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES\n+ ):\n+ result[i] = scheduler\n+ self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(\n+ engine\n+ ) # pointing for deepspeed_engine_wrapped.backward()\n self._models.append(engine)\n- self._optimizers.append(engine.optimizer)\n+ self._optimizers.append(optimizer)\n+ self._schedulers.append(scheduler)\n assert (\n len(self._models) == 1\n ), \"You can't use same `Accelerator()` instance with 2 models when using DeepSpeed\"\n \n if self.distributed_type == DistributedType.DEEPSPEED:\n assert hasattr(\n- self, \"deepspeed_engine\"\n- ), \"You need to pass the model along the optimizer when using Deepspeed.\"\n+ self, \"deepspeed_engine_wrapped\"\n+ ), \"You need to pass the model along with the optimizer when using Deepspeed.\"", "from_author": false }, { "body": "```suggestion\r\n \"Do you want to specify a json file to a DeepSpeed config? [yes/NO]: \",\r\n```", "diff_hunk": "@@ -77,24 +77,72 @@ def get_cluster_input():\n ), \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source\"\n \n if distributed_type == DistributedType.DEEPSPEED:\n- deepspeed_config[\"zero_stage\"] = _ask_field(\n- \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n- lambda x: int(x),\n- default=2,\n+ use_deepspeed_config = _ask_field(\n+ \"Do you want to specify DeepSpeed json config filepath? [yes/NO]: \",", "from_author": false }, { "body": "```suggestion\r\n \"Please enter the path to the json DeepSpeed config file: \",\r\n```", "diff_hunk": "@@ -77,24 +77,72 @@ def get_cluster_input():\n ), \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source\"\n \n if distributed_type == DistributedType.DEEPSPEED:\n- deepspeed_config[\"zero_stage\"] = _ask_field(\n- \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n- lambda x: int(x),\n- default=2,\n+ use_deepspeed_config = _ask_field(\n+ \"Do you want to specify DeepSpeed json config filepath? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n )\n-\n- if deepspeed_config[\"zero_stage\"] >= 2:\n- deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\n- \"Where to offload optimizer states? [NONE/cpu/nvme]: \",\n+ if use_deepspeed_config:\n+ deepspeed_config[\"deepspeed_config_file\"] = _ask_field(\n+ \"Please enter filepath of the DeepSpeed json config file: \",", "from_author": false }, { "body": "Maybe put a comment to explain DeepSpeed will handle this?", "diff_hunk": "@@ -648,13 +711,17 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n if parameters == [p for p in model.parameters()]:\n model.clip_grad_norm_(max_norm, norm_type)\n return\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\n+ return", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -648,13 +711,17 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n if parameters == [p for p in model.parameters()]:\n model.clip_grad_norm_(max_norm, norm_type)\n return\n+ elif self.distributed_type == DistributedType.DEEPSPEED:\n+ return\n self.unscale_gradients()\n torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n \n def clip_grad_value_(self, parameters, clip_value):\n \"\"\"\n Should be used in place of `torch.nn.utils.clip_grad_value_`.\n \"\"\"\n+ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:\n+ return", "from_author": false }, { "body": "Not really sure it's worth adding all of those below. The DeepSpeed config is enough IMO.", "diff_hunk": "@@ -57,6 +58,56 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Whether to use deepspeed.\",\n )\n+ parser.add_argument(\n+ \"--deepspeed_config_file\",\n+ default=None,\n+ type=str,\n+ help=\"DeepSpeed's config file (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(", "from_author": false }, { "body": "Not sure I understand the comments. Is it everything that happens in this step? If that's the case, can you add a comment stating it at the top?", "diff_hunk": "@@ -12,58 +12,26 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from ..optimizer import AcceleratedOptimizer\n-from .imports import is_apex_available, is_deepspeed_available\n-\n-\n-if is_deepspeed_available():\n- from deepspeed import DeepSpeedEngine\n-\n-if is_apex_available():\n- from apex import amp\n-\n-\n-class DeepSpeedEngineWrapper(DeepSpeedEngine):\n- \"\"\"\n- Wrapper over deepspeed.DeepSpeedEngine object\n- \"\"\"\n-\n- def __init__(self, *args, **kwargs):\n- super().__init__(*args, **kwargs)\n+from accelerate.scheduler import AcceleratedScheduler\n \n- # overwriting micro_steps for user's gradient_accumulation\n- self.micro_steps = -1\n+from ..optimizer import AcceleratedOptimizer\n \n- def step(self, lr_kwargs=None):\n- \"\"\"DeepSpeedEngine.step() without `micro_steps` update & no profiling\"\"\"\n- if self.is_gradient_accumulation_boundary(): # it shouldn't matter whether we keep this line or not\n- if self.progressive_layer_drop:\n- self.progressive_layer_drop.update_state(self.global_steps)\n \n- self._take_model_step(lr_kwargs)\n+class DeepSpeedEngineWrapper:\n+ def __init__(self, engine):\n+ self.engine = engine\n \n def backward(self, loss):\n- \"\"\"DeepSpeedEngine.backward() with with no loss scaling; no profiling but with `micro_steps` update\"\"\"\n-\n- if self.zero_optimization():\n- self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()\n- self.optimizer.backward(loss)\n- elif self.amp_enabled():\n- # AMP requires delaying unscale when inside gradient accumulation boundaries\n- # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n- delay_unscale = not self.is_gradient_accumulation_boundary()\n- with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:\n- scaled_loss.backward()\n- elif self.fp16_enabled():\n- self.optimizer.backward(loss)\n- else:\n- loss.backward()\n-\n- if self.enable_backward_allreduce:\n- self.allreduce_gradients()\n+ # runs backpropagation and handles mixed precision\n+ self.engine.backward(loss)\n \n- # this will ensure deepspeed gradient_accumulation matches user's accumulation\n- self.micro_steps += 1\n+ # gradient accumulation check\n+ # gradient clipping\n+ # optimizer step\n+ # zero grad\n+ # checking overflow\n+ # lr_scheduler step\n+ self.engine.step()", "from_author": false }, { "body": "```suggestion\r\n self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3)\r\n```\r\nFits in one line ;-)", "diff_hunk": "@@ -0,0 +1,546 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import io\n+import itertools\n+import json\n+import os\n+import tempfile\n+import unittest\n+from copy import deepcopy\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate.accelerator import Accelerator\n+from accelerate.scheduler import AcceleratedScheduler\n+from accelerate.state import AcceleratorState\n+from accelerate.test_utils.testing import require_cuda, require_deepspeed\n+from accelerate.test_utils.training import RegressionDataset\n+from accelerate.utils.dataclasses import DeepSpeedPlugin\n+from accelerate.utils.deepspeed import (\n+ DeepSpeedEngineWrapper,\n+ DeepSpeedOptimizerWrapper,\n+ DeepSpeedSchedulerWrapper,\n+ DummyOptim,\n+ DummyScheduler,\n+)\n+from parameterized import parameterized\n+from transformers import AutoModel, AutoModelForCausalLM, get_scheduler\n+from transformers.deepspeed import HfDeepSpeedConfig\n+from transformers.testing_utils import mockenv_context\n+from transformers.trainer_utils import set_seed\n+from transformers.utils import is_torch_bf16_available\n+\n+\n+set_seed(42)\n+\n+T5_SMALL = \"t5-small\"\n+T5_TINY = \"patrickvonplaten/t5-tiny-random\"\n+GPT2_TINY = \"sshleifer/tiny-gpt2\"\n+\n+ZERO2 = \"zero2\"\n+ZERO3 = \"zero3\"\n+\n+FP16 = \"fp16\"\n+BF16 = \"bf16\"\n+\n+CUSTOM_OPTIMIZER = \"custom_optimizer\"\n+CUSTOM_SCHEDULER = \"custom_scheduler\"\n+DS_OPTIMIZER = \"deepspeed_optimizer\"\n+DS_SCHEDULER = \"deepspeed_scheduler\"\n+\n+stages = [ZERO2, ZERO3]\n+optims = [CUSTOM_OPTIMIZER, DS_OPTIMIZER]\n+schedulers = [CUSTOM_SCHEDULER, DS_SCHEDULER]\n+if is_torch_bf16_available():\n+ dtypes = [FP16, BF16]\n+else:\n+ dtypes = [FP16]\n+\n+\n+def parameterized_custom_name_func(func, param_num, param):\n+ # customize the test name generator function as we want both params to appear in the sub-test\n+ # name, as by default it shows only the first param\n+ param_based_name = parameterized.to_safe_name(\"_\".join(str(x) for x in param.args))\n+ return f\"{func.__name__}_{param_based_name}\"\n+\n+\n+# Cartesian-product of zero stages with models to test\n+params = list(itertools.product(stages, dtypes))\n+optim_scheduler_params = list(itertools.product(optims, schedulers))\n+\n+\n+@require_deepspeed\n+@require_cuda\n+class DeepSpeedConfigIntegration(unittest.TestCase):\n+ def setUp(self):\n+ super().setUp()\n+\n+ self._test_file_path = inspect.getfile(self.__class__)\n+ path = Path(self._test_file_path).resolve()\n+ self.test_file_dir_str = str(path.parents[0])\n+\n+ self.ds_config_file = dict(\n+ zero2=f\"{self.test_file_dir_str}/ds_config_zero2.json\",\n+ zero3=f\"{self.test_file_dir_str}/ds_config_zero3.json\",\n+ )\n+\n+ # use self.get_config_dict(stage) to use these to ensure the original is not modified\n+ with io.open(self.ds_config_file[ZERO2], \"r\", encoding=\"utf-8\") as f:\n+ config_zero2 = json.load(f)\n+ with io.open(self.ds_config_file[ZERO3], \"r\", encoding=\"utf-8\") as f:\n+ config_zero3 = json.load(f)\n+ # The following setting slows things down, so don't enable it by default unless needed by a test.\n+ # It's in the file as a demo for users since we want everything to work out of the box even if slower.\n+ config_zero3[\"zero_optimization\"][\"stage3_gather_16bit_weights_on_model_save\"] = False\n+\n+ self.ds_config_dict = dict(\n+ zero2=config_zero2,\n+ zero3=config_zero3,\n+ )", "from_author": false }, { "body": "I thought about this. The thing is, a weakref to `HFDeepSpeedConfig` is created in that file (transformers.deepspeed). This is important only when using ZeRO Stage-3 when we don't want to load Transformer models fully on CPU/GPU and we want to directly partition the model parameters across GPUs. This weakref `_hf_deepspeed_config_weak_ref` is used in transformers `modeling_utils.py` to check if deepspeed zero stage3 is enabled. If it is enabled DeepSpeed functionality of `zero.init` is used to directly partition model parameters across GPUs. It is used by `from_config` (when training from scratch) and `from_pretrained` (when finetuning) methods. \r\n\r\nSnippet in `modeling_utils.py`:\r\n```python\r\nif is_deepspeed_zero3_enabled():\r\n import deepspeed\r\n \r\n logger.info(\"Detected DeepSpeed ZeRO-3: activating zero.init() for this model\")\r\n # this immediately partitions the model across all gpus, to avoid the overhead in time\r\n # and memory copying it on CPU or each GPU first\r\n with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()):\r\n model = cls(config, **kwargs)\r\nelse:\r\n model = cls(config, **kwargs)\r\n```\r\n`is_deepspeed_zero3_enabled` from above snippet directly refers to the weakref in transformers.deepspeed\r\n```python\r\ndef is_deepspeed_zero3_enabled():\r\n if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:\r\n return _hf_deepspeed_config_weak_ref().is_zero3()\r\n else:\r\n return False\r\n```\r\n\r\nDue to above reasons I thought it would be good to let this be part of `transformers` repo as it is specifically used only in ZeRO Stage-3 for **efficiently loading models that are part of `transformers` repo.**\r\n", "diff_hunk": "@@ -163,6 +172,25 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ if deepspeed_plugin:\n+ if not is_deepspeed_available():\n+ raise ImportError(\n+ \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\"\n+ )\n+ if compare_versions(\"deepspeed\", \"<\", \"0.6.4\"):\n+ raise ImportError(\"DeepSpeed version must be >= 0.6.4. Please update DeepSpeed.\")\n+ if os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\" or deepspeed_plugin.zero3_init_flag:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip3 install transformers`.\"\n+ )\n+ from transformers.deepspeed import HfDeepSpeedConfig", "from_author": true }, { "body": "With the move, the weakref will disappear and we will rely on the `AcceleratorState` to know if zero3 is enabled inside Transformers. Again, not sure when is the right point to do the move (as it will make Accelerate a hard dep of Transformers) but want to flag this is the final destination :-)", "diff_hunk": "@@ -163,6 +172,25 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+ if deepspeed_plugin:\n+ if not is_deepspeed_available():\n+ raise ImportError(\n+ \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\"\n+ )\n+ if compare_versions(\"deepspeed\", \"<\", \"0.6.4\"):\n+ raise ImportError(\"DeepSpeed version must be >= 0.6.4. Please update DeepSpeed.\")\n+ if os.environ.get(\"DEEPSPEED_ZERO3_INIT\", \"false\") == \"true\" or deepspeed_plugin.zero3_init_flag:\n+ if not is_transformers_available():\n+ raise Exception(\n+ \"When `zero3_init_flag` is set, it requires Transformers to be installed. \"\n+ \"Please run `pip3 install transformers`.\"\n+ )\n+ from transformers.deepspeed import HfDeepSpeedConfig", "from_author": false }, { "body": "I would focus the intro on Zero and offload which is the main feature DeepSpeed offers. Distributed training, mixed precision are done by Accelerate, not really specific to DeepSpeed ;-)", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.", "from_author": false }, { "body": "A picture here or before to explain the differences from ZeRO stages would be great. Same, quickly explaining what the offload is doing would be great (here or above). Also, for me `ZeRO-Infinity` is the NVME offload (that's how they released it at least).\r\n\r\nI'd focus on non-DeepSpeed marketing terms such a CPU offload and Disk offload (the disk should be an NVME for decent speed but it technically work on any Disk).", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).", "from_author": false }, { "body": "I didn't realize this meant via the `accelerate config` command, so I'd say it epxilcitly:\r\n```suggestion\r\nWe will first look at easy to use integration via `accelerate config`. \r\n```", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. ", "from_author": false }, { "body": "```suggestion\r\nand answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config.\r\n```", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. ", "from_author": false }, { "body": "It would be better to launch a basic example here.", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: cpu\n+ offload_param_device: cpu\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\", "from_author": false }, { "body": "Segway to the next section here: \"To be able to tweak more options, you will need to use a DeepSpeed config file.\"", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: cpu\n+ offload_param_device: cpu\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage3_offload_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 32 \\\n+--per_device_eval_batch_size 32 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning\n+`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.\n+`gradient_clipping`: Enable gradient clipping with value.\n+`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.\n+`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.\n+`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.\n+`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.\n+`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. \n+```\n+", "from_author": false }, { "body": "```suggestion\r\n # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer\r\n```", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: cpu\n+ offload_param_device: cpu\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage3_offload_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 32 \\\n+--per_device_eval_batch_size 32 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning\n+`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.\n+`gradient_clipping`: Enable gradient clipping with value.\n+`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.\n+`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.\n+`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.\n+`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.\n+`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. \n+```\n+\n+### DeepSpeed Config File\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes \n+and provide the path to the deepspeed config file. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File:\n+\n+**ZeRO Stage-2 DeepSpeed Config File Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+with the contents of `zero_stage2_config.json` being:\n+```json\n+{\n+ \"fp16\": {\n+ \"enabled\": \"auto\",\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 2,\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU offload DeepSpeed Config File Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+with the contents of `zero_stage3_offload_config.json` being:\n+```json\n+{\n+ \"fp16\": {\n+ \"enabled\": \"auto\",\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\"\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"offload_param\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"overlap_comm\": true,\n+ \"contiguous_gradients\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"stage3_prefetch_bucket_size\": \"auto\",\n+ \"stage3_param_persistence_threshold\": \"auto\",\n+ \"sub_group_size\": 1e9,\n+ \"stage3_max_live_parameters\": 1e9,\n+ \"stage3_max_reuse_distance\": 1e9,\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\"\n+ },\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage3_offload_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 32 \\\n+--per_device_eval_batch_size 32 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**Important code changes when using DeepSpeed Config File**\n+\n+1. DeepSpeed Optimizers and Schedulers. For more information on these, \n+see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation.\n+We will look at the changes needed in the code when using these.\n+ \n+ a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys present in the DeepSpeed config file.\n+ In this situation, those will be used and user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.\n+ Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:\n+ ```python\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ if \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config:\n+ optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n+ else:\n+ optimizer = DummyOptim(param_groups=optimizer_grouped_parameters)\n+\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer", "from_author": false }, { "body": "This is a huge block, I'd document less auto option plus make sure we automatically set the ones we can!", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: cpu\n+ offload_param_device: cpu\n+ zero3_init_flag: true\n+ zero3_save_16bit_model: true\n+ zero_stage: 3\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage3_offload_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 32 \\\n+--per_device_eval_batch_size 32 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning\n+`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.\n+`gradient_clipping`: Enable gradient clipping with value.\n+`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.\n+`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.\n+`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.\n+`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.\n+`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. \n+```\n+\n+### DeepSpeed Config File\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes \n+and provide the path to the deepspeed config file. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File:\n+\n+**ZeRO Stage-2 DeepSpeed Config File Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+with the contents of `zero_stage2_config.json` being:\n+```json\n+{\n+ \"fp16\": {\n+ \"enabled\": \"auto\",\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\",\n+ \"torch_adam\": true,\n+ \"adam_w_mode\": true\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 2,\n+ \"allgather_partitions\": true,\n+ \"allgather_bucket_size\": 2e8,\n+ \"overlap_comm\": true,\n+ \"reduce_scatter\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"contiguous_gradients\": true\n+ },\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage2_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 24 \\\n+--per_device_eval_batch_size 24 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**ZeRO Stage-3 with CPU offload DeepSpeed Config File Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json\n+ zero3_init_flag: true\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+with the contents of `zero_stage3_offload_config.json` being:\n+```json\n+{\n+ \"fp16\": {\n+ \"enabled\": \"auto\",\n+ \"loss_scale\": 0,\n+ \"loss_scale_window\": 1000,\n+ \"initial_scale_power\": 16,\n+ \"hysteresis\": 2,\n+ \"min_loss_scale\": 1\n+ },\n+ \"optimizer\": {\n+ \"type\": \"AdamW\",\n+ \"params\": {\n+ \"lr\": \"auto\",\n+ \"weight_decay\": \"auto\"\n+ }\n+ },\n+ \"scheduler\": {\n+ \"type\": \"WarmupDecayLR\",\n+ \"params\": {\n+ \"warmup_min_lr\": \"auto\",\n+ \"warmup_max_lr\": \"auto\",\n+ \"warmup_num_steps\": \"auto\",\n+ \"total_num_steps\": \"auto\"\n+ }\n+ },\n+ \"zero_optimization\": {\n+ \"stage\": 3,\n+ \"offload_optimizer\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"offload_param\": {\n+ \"device\": \"cpu\",\n+ \"pin_memory\": true\n+ },\n+ \"overlap_comm\": true,\n+ \"contiguous_gradients\": true,\n+ \"reduce_bucket_size\": \"auto\",\n+ \"stage3_prefetch_bucket_size\": \"auto\",\n+ \"stage3_param_persistence_threshold\": \"auto\",\n+ \"sub_group_size\": 1e9,\n+ \"stage3_max_live_parameters\": 1e9,\n+ \"stage3_max_reuse_distance\": 1e9,\n+ \"stage3_gather_16bit_weights_on_model_save\": \"auto\"\n+ },\n+ \"gradient_accumulation_steps\": \"auto\",\n+ \"gradient_clipping\": \"auto\",\n+ \"steps_per_print\": 2000,\n+ \"train_batch_size\": \"auto\",\n+ \"train_micro_batch_size_per_gpu\": \"auto\",\n+ \"wall_clock_breakdown\": false\n+}\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\\n+--config_name \"gpt2-large\" \\\n+--tokenizer_name \"gpt2-large\" \\\n+--dataset_name \"wikitext\" \\\n+--dataset_config_name \"wikitext-2-raw-v1\" \\\n+--block_size 128 \\\n+--output_dir \"./clm/clm_deepspeed_stage3_offload_accelerate\" \\\n+--learning_rate 5e-4 \\\n+--per_device_train_batch_size 32 \\\n+--per_device_eval_batch_size 32 \\\n+--num_train_epochs 3 \\\n+--with_tracking \\\n+--report_to \"wandb\"\\\n+```\n+\n+**Important code changes when using DeepSpeed Config File**\n+\n+1. DeepSpeed Optimizers and Schedulers. For more information on these, \n+see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation.\n+We will look at the changes needed in the code when using these.\n+ \n+ a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys present in the DeepSpeed config file.\n+ In this situation, those will be used and user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.\n+ Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:\n+ ```python\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ if \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config:\n+ optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n+ else:\n+ optimizer = DummyOptim(param_groups=optimizer_grouped_parameters)\n+\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ if \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config:\n+ lr_scheduler = get_scheduler(\n+ name=args.lr_scheduler_type,\n+ optimizer=optimizer,\n+ num_warmup_steps=args.num_warmup_steps,\n+ num_training_steps=args.max_train_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler()\n+ ```\n+ b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n+ In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.\n+ In the above example we can see that the code reamins unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.\n+\n+ c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. \n+ In this situation, user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. \n+\n+ d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. \n+ This will result in an error because one can only use DS Scheduler when using DS Optim.\n+\n+2. Notice the `auto` values in the above example DeepSpeed config files. \n+These have to specified by the user in the code by running `accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)`. \n+This allows user more flexibilty to fill these fields based on the code instead of hardcoding them. \n+Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing how to fill these: \n+```python", "from_author": false }, { "body": "```suggestion\r\ndef checkpoint_model(checkpoint_folder, ckpt_id, model, epoch, last_global_step, **kwargs):\r\n```\r\nAll-caps name are for constants, and here this is not a constant :-)", "diff_hunk": "@@ -0,0 +1,753 @@\n+#!/usr/bin/env python\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\n+on a text file or a dataset without using HuggingFace Trainer.\n+\n+Here is the full list of checkpoints on the hub that can be fine-tuned by this script:\n+https://huggingface.co/models?filter=text-generation\n+\"\"\"\n+# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n+\n+import argparse\n+import json\n+import logging\n+import math\n+import os\n+import random\n+from itertools import chain\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import datasets\n+import transformers\n+from accelerate import Accelerator, DistributedType\n+from accelerate.logging import get_logger\n+from accelerate.utils import DummyOptim, DummyScheduler, set_seed\n+from datasets import load_dataset\n+from huggingface_hub import Repository\n+from tqdm.auto import tqdm\n+from transformers import (\n+ CONFIG_MAPPING,\n+ MODEL_MAPPING,\n+ AutoConfig,\n+ AutoModelForCausalLM,\n+ AutoTokenizer,\n+ SchedulerType,\n+ default_data_collator,\n+ get_scheduler,\n+)\n+from transformers.utils import get_full_repo_name\n+from transformers.utils.versions import require_version\n+\n+\n+logger = get_logger(__name__)\n+\n+require_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\n+\n+MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\n+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n+\n+\n+def parse_args():\n+ parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a causal language modeling task\")\n+ parser.add_argument(\n+ \"--dataset_name\",\n+ type=str,\n+ default=None,\n+ help=\"The name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--dataset_config_name\",\n+ type=str,\n+ default=None,\n+ help=\"The configuration name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_split_percentage\",\n+ default=5,\n+ help=\"The percentage of the train set used as validation set in case there's no validation split\",\n+ )\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--config_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained config name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--tokenizer_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained tokenizer name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--use_slow_tokenizer\",\n+ action=\"store_true\",\n+ help=\"If passed, will use a slow tokenizer (not backed by the πŸ€— Tokenizers library).\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_train_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the training dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_eval_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the evaluation dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--learning_rate\",\n+ type=float,\n+ default=5e-5,\n+ help=\"Initial learning rate (after the potential warmup period) to use.\",\n+ )\n+ parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n+ parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n+ parser.add_argument(\n+ \"--max_train_steps\",\n+ type=int,\n+ default=None,\n+ help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ type=int,\n+ default=1,\n+ help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n+ )\n+ parser.add_argument(\n+ \"--lr_scheduler_type\",\n+ type=SchedulerType,\n+ default=\"linear\",\n+ help=\"The scheduler type to use.\",\n+ choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n+ )\n+ parser.add_argument(\n+ \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n+ )\n+ parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n+ parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n+ parser.add_argument(\n+ \"--model_type\",\n+ type=str,\n+ default=None,\n+ help=\"Model type to use if training from scratch.\",\n+ choices=MODEL_TYPES,\n+ )\n+ parser.add_argument(\n+ \"--block_size\",\n+ type=int,\n+ default=None,\n+ help=(\n+ \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n+ \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n+ \" account special tokens).\"\n+ ),\n+ )\n+ parser.add_argument(\n+ \"--preprocessing_num_workers\",\n+ type=int,\n+ default=None,\n+ help=\"The number of processes to use for the preprocessing.\",\n+ )\n+ parser.add_argument(\n+ \"--overwrite_cache\", type=bool, default=False, help=\"Overwrite the cached training and evaluation sets\"\n+ )\n+ parser.add_argument(\n+ \"--no_keep_linebreaks\", action=\"store_true\", help=\"Do not keep line breaks when using TXT files.\"\n+ )\n+ parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n+ parser.add_argument(\n+ \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n+ )\n+ parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ # New Code #\n+ # Whether to load the best model at the end of training\n+ parser.add_argument(\n+ \"--load_best_model\",\n+ action=\"store_true\",\n+ help=\"Whether to load the best model at the end of training\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to enable experiment trackers for logging.\",\n+ )\n+ parser.add_argument(\n+ \"--report_to\",\n+ type=str,\n+ default=\"all\",\n+ help=(\n+ 'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n+ ' `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` (default) to report to all integrations.'\n+ \"Only applicable when `--with_tracking` is passed.\"\n+ ),\n+ )\n+ args = parser.parse_args()\n+\n+ # Sanity checks\n+ if args.dataset_name is None and args.train_file is None and args.validation_file is None:\n+ raise ValueError(\"Need either a dataset name or a training/validation file.\")\n+ else:\n+ if args.train_file is not None:\n+ extension = args.train_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\n+ if args.validation_file is not None:\n+ extension = args.validation_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\n+\n+ if args.push_to_hub:\n+ assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n+\n+ return args\n+\n+\n+# New Code #\n+def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step, **kwargs):", "from_author": false }, { "body": "```suggestion\r\n success = model.save_checkpoint(checkpoint_folder, ckpt_id, checkpoint_state_dict)\r\n status_msg = f\"checkpointing: checkpoint_folder={checkpoint_folder}, ckpt_id={ckpt_id}\"\r\n```\r\nVariable name + f-strings only.", "diff_hunk": "@@ -0,0 +1,753 @@\n+#!/usr/bin/env python\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\n+on a text file or a dataset without using HuggingFace Trainer.\n+\n+Here is the full list of checkpoints on the hub that can be fine-tuned by this script:\n+https://huggingface.co/models?filter=text-generation\n+\"\"\"\n+# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n+\n+import argparse\n+import json\n+import logging\n+import math\n+import os\n+import random\n+from itertools import chain\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import datasets\n+import transformers\n+from accelerate import Accelerator, DistributedType\n+from accelerate.logging import get_logger\n+from accelerate.utils import DummyOptim, DummyScheduler, set_seed\n+from datasets import load_dataset\n+from huggingface_hub import Repository\n+from tqdm.auto import tqdm\n+from transformers import (\n+ CONFIG_MAPPING,\n+ MODEL_MAPPING,\n+ AutoConfig,\n+ AutoModelForCausalLM,\n+ AutoTokenizer,\n+ SchedulerType,\n+ default_data_collator,\n+ get_scheduler,\n+)\n+from transformers.utils import get_full_repo_name\n+from transformers.utils.versions import require_version\n+\n+\n+logger = get_logger(__name__)\n+\n+require_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\n+\n+MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\n+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n+\n+\n+def parse_args():\n+ parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a causal language modeling task\")\n+ parser.add_argument(\n+ \"--dataset_name\",\n+ type=str,\n+ default=None,\n+ help=\"The name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--dataset_config_name\",\n+ type=str,\n+ default=None,\n+ help=\"The configuration name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_split_percentage\",\n+ default=5,\n+ help=\"The percentage of the train set used as validation set in case there's no validation split\",\n+ )\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--config_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained config name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--tokenizer_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained tokenizer name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--use_slow_tokenizer\",\n+ action=\"store_true\",\n+ help=\"If passed, will use a slow tokenizer (not backed by the πŸ€— Tokenizers library).\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_train_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the training dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_eval_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the evaluation dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--learning_rate\",\n+ type=float,\n+ default=5e-5,\n+ help=\"Initial learning rate (after the potential warmup period) to use.\",\n+ )\n+ parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n+ parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n+ parser.add_argument(\n+ \"--max_train_steps\",\n+ type=int,\n+ default=None,\n+ help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ type=int,\n+ default=1,\n+ help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n+ )\n+ parser.add_argument(\n+ \"--lr_scheduler_type\",\n+ type=SchedulerType,\n+ default=\"linear\",\n+ help=\"The scheduler type to use.\",\n+ choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n+ )\n+ parser.add_argument(\n+ \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n+ )\n+ parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n+ parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n+ parser.add_argument(\n+ \"--model_type\",\n+ type=str,\n+ default=None,\n+ help=\"Model type to use if training from scratch.\",\n+ choices=MODEL_TYPES,\n+ )\n+ parser.add_argument(\n+ \"--block_size\",\n+ type=int,\n+ default=None,\n+ help=(\n+ \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n+ \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n+ \" account special tokens).\"\n+ ),\n+ )\n+ parser.add_argument(\n+ \"--preprocessing_num_workers\",\n+ type=int,\n+ default=None,\n+ help=\"The number of processes to use for the preprocessing.\",\n+ )\n+ parser.add_argument(\n+ \"--overwrite_cache\", type=bool, default=False, help=\"Overwrite the cached training and evaluation sets\"\n+ )\n+ parser.add_argument(\n+ \"--no_keep_linebreaks\", action=\"store_true\", help=\"Do not keep line breaks when using TXT files.\"\n+ )\n+ parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n+ parser.add_argument(\n+ \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n+ )\n+ parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ # New Code #\n+ # Whether to load the best model at the end of training\n+ parser.add_argument(\n+ \"--load_best_model\",\n+ action=\"store_true\",\n+ help=\"Whether to load the best model at the end of training\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to enable experiment trackers for logging.\",\n+ )\n+ parser.add_argument(\n+ \"--report_to\",\n+ type=str,\n+ default=\"all\",\n+ help=(\n+ 'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n+ ' `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` (default) to report to all integrations.'\n+ \"Only applicable when `--with_tracking` is passed.\"\n+ ),\n+ )\n+ args = parser.parse_args()\n+\n+ # Sanity checks\n+ if args.dataset_name is None and args.train_file is None and args.validation_file is None:\n+ raise ValueError(\"Need either a dataset name or a training/validation file.\")\n+ else:\n+ if args.train_file is not None:\n+ extension = args.train_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\n+ if args.validation_file is not None:\n+ extension = args.validation_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\n+\n+ if args.push_to_hub:\n+ assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n+\n+ return args\n+\n+\n+# New Code #\n+def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step, **kwargs):\n+ \"\"\"Utility function for checkpointing model + optimizer dictionaries\n+ The main purpose for this is to be able to resume training from that instant again\n+ \"\"\"\n+ checkpoint_state_dict = {\n+ \"epoch\": epoch,\n+ \"last_global_step\": last_global_step,\n+ }\n+ # Add extra kwargs too\n+ checkpoint_state_dict.update(kwargs)\n+\n+ success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)\n+ status_msg = \"checkpointing: PATH={}, ckpt_id={}\".format(PATH, ckpt_id)", "from_author": false }, { "body": "As discussed offline this is a very big specific block. We should try to avoid this at all cost.", "diff_hunk": "@@ -0,0 +1,753 @@\n+#!/usr/bin/env python\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\n+on a text file or a dataset without using HuggingFace Trainer.\n+\n+Here is the full list of checkpoints on the hub that can be fine-tuned by this script:\n+https://huggingface.co/models?filter=text-generation\n+\"\"\"\n+# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n+\n+import argparse\n+import json\n+import logging\n+import math\n+import os\n+import random\n+from itertools import chain\n+from pathlib import Path\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+import datasets\n+import transformers\n+from accelerate import Accelerator, DistributedType\n+from accelerate.logging import get_logger\n+from accelerate.utils import DummyOptim, DummyScheduler, set_seed\n+from datasets import load_dataset\n+from huggingface_hub import Repository\n+from tqdm.auto import tqdm\n+from transformers import (\n+ CONFIG_MAPPING,\n+ MODEL_MAPPING,\n+ AutoConfig,\n+ AutoModelForCausalLM,\n+ AutoTokenizer,\n+ SchedulerType,\n+ default_data_collator,\n+ get_scheduler,\n+)\n+from transformers.utils import get_full_repo_name\n+from transformers.utils.versions import require_version\n+\n+\n+logger = get_logger(__name__)\n+\n+require_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\n+\n+MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\n+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n+\n+\n+def parse_args():\n+ parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a causal language modeling task\")\n+ parser.add_argument(\n+ \"--dataset_name\",\n+ type=str,\n+ default=None,\n+ help=\"The name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--dataset_config_name\",\n+ type=str,\n+ default=None,\n+ help=\"The configuration name of the dataset to use (via the datasets library).\",\n+ )\n+ parser.add_argument(\n+ \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n+ )\n+ parser.add_argument(\n+ \"--validation_split_percentage\",\n+ default=5,\n+ help=\"The percentage of the train set used as validation set in case there's no validation split\",\n+ )\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=False,\n+ )\n+ parser.add_argument(\n+ \"--config_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained config name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--tokenizer_name\",\n+ type=str,\n+ default=None,\n+ help=\"Pretrained tokenizer name or path if not the same as model_name\",\n+ )\n+ parser.add_argument(\n+ \"--use_slow_tokenizer\",\n+ action=\"store_true\",\n+ help=\"If passed, will use a slow tokenizer (not backed by the πŸ€— Tokenizers library).\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_train_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the training dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--per_device_eval_batch_size\",\n+ type=int,\n+ default=8,\n+ help=\"Batch size (per device) for the evaluation dataloader.\",\n+ )\n+ parser.add_argument(\n+ \"--learning_rate\",\n+ type=float,\n+ default=5e-5,\n+ help=\"Initial learning rate (after the potential warmup period) to use.\",\n+ )\n+ parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n+ parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n+ parser.add_argument(\n+ \"--max_train_steps\",\n+ type=int,\n+ default=None,\n+ help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ type=int,\n+ default=1,\n+ help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n+ )\n+ parser.add_argument(\n+ \"--lr_scheduler_type\",\n+ type=SchedulerType,\n+ default=\"linear\",\n+ help=\"The scheduler type to use.\",\n+ choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n+ )\n+ parser.add_argument(\n+ \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n+ )\n+ parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n+ parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n+ parser.add_argument(\n+ \"--model_type\",\n+ type=str,\n+ default=None,\n+ help=\"Model type to use if training from scratch.\",\n+ choices=MODEL_TYPES,\n+ )\n+ parser.add_argument(\n+ \"--block_size\",\n+ type=int,\n+ default=None,\n+ help=(\n+ \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n+ \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n+ \" account special tokens).\"\n+ ),\n+ )\n+ parser.add_argument(\n+ \"--preprocessing_num_workers\",\n+ type=int,\n+ default=None,\n+ help=\"The number of processes to use for the preprocessing.\",\n+ )\n+ parser.add_argument(\n+ \"--overwrite_cache\", type=bool, default=False, help=\"Overwrite the cached training and evaluation sets\"\n+ )\n+ parser.add_argument(\n+ \"--no_keep_linebreaks\", action=\"store_true\", help=\"Do not keep line breaks when using TXT files.\"\n+ )\n+ parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n+ parser.add_argument(\n+ \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n+ )\n+ parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ # New Code #\n+ # Whether to load the best model at the end of training\n+ parser.add_argument(\n+ \"--load_best_model\",\n+ action=\"store_true\",\n+ help=\"Whether to load the best model at the end of training\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to enable experiment trackers for logging.\",\n+ )\n+ parser.add_argument(\n+ \"--report_to\",\n+ type=str,\n+ default=\"all\",\n+ help=(\n+ 'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n+ ' `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` (default) to report to all integrations.'\n+ \"Only applicable when `--with_tracking` is passed.\"\n+ ),\n+ )\n+ args = parser.parse_args()\n+\n+ # Sanity checks\n+ if args.dataset_name is None and args.train_file is None and args.validation_file is None:\n+ raise ValueError(\"Need either a dataset name or a training/validation file.\")\n+ else:\n+ if args.train_file is not None:\n+ extension = args.train_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\n+ if args.validation_file is not None:\n+ extension = args.validation_file.split(\".\")[-1]\n+ assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\n+\n+ if args.push_to_hub:\n+ assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n+\n+ return args\n+\n+\n+# New Code #\n+def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step, **kwargs):\n+ \"\"\"Utility function for checkpointing model + optimizer dictionaries\n+ The main purpose for this is to be able to resume training from that instant again\n+ \"\"\"\n+ checkpoint_state_dict = {\n+ \"epoch\": epoch,\n+ \"last_global_step\": last_global_step,\n+ }\n+ # Add extra kwargs too\n+ checkpoint_state_dict.update(kwargs)\n+\n+ success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)\n+ status_msg = \"checkpointing: PATH={}, ckpt_id={}\".format(PATH, ckpt_id)\n+ if success:\n+ logging.info(f\"Success {status_msg}\")\n+ else:\n+ logging.warning(f\"Failure {status_msg}\")\n+ return\n+\n+\n+# New Code #\n+def load_training_checkpoint(model, load_dir, tag=None, **kwargs):\n+ \"\"\"Utility function for checkpointing model + optimizer dictionaries\n+ The main purpose for this is to be able to resume training from that instant again\n+ \"\"\"\n+ _, checkpoint_state_dict = model.load_checkpoint(load_dir, tag=tag, **kwargs)\n+ epoch = checkpoint_state_dict[\"epoch\"]\n+ last_global_step = checkpoint_state_dict[\"last_global_step\"]\n+ del checkpoint_state_dict\n+ return (epoch, last_global_step)\n+\n+\n+# New Code #\n+def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):\n+ model.eval()\n+ losses = []\n+ for step, batch in enumerate(eval_dataloader):\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+\n+ loss = outputs.loss\n+ losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\n+\n+ losses = torch.cat(losses)\n+ losses = losses[: len(eval_dataset)]\n+ try:\n+ eval_loss = torch.mean(losses)\n+ perplexity = math.exp(eval_loss)\n+ except OverflowError:\n+ perplexity = float(\"inf\")\n+ return perplexity, eval_loss\n+\n+\n+def main():\n+ args = parse_args()\n+\n+ # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n+ # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers\n+ # in the environment\n+ accelerator = (\n+ Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()\n+ )\n+ # Make one log on every process with the configuration for debugging.\n+ logging.basicConfig(\n+ format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n+ datefmt=\"%m/%d/%Y %H:%M:%S\",\n+ level=logging.INFO,\n+ )\n+ logger.info(accelerator.state, main_process_only=False)\n+ if accelerator.is_local_main_process:\n+ datasets.utils.logging.set_verbosity_warning()\n+ transformers.utils.logging.set_verbosity_info()\n+ else:\n+ datasets.utils.logging.set_verbosity_error()\n+ transformers.utils.logging.set_verbosity_error()\n+\n+ # If passed along, set the training seed now.\n+ if args.seed is not None:\n+ set_seed(args.seed)\n+\n+ # Handle the repository creation\n+ if accelerator.is_main_process:\n+ if args.push_to_hub:\n+ if args.hub_model_id is None:\n+ repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\n+ else:\n+ repo_name = args.hub_model_id\n+ repo = Repository(args.output_dir, clone_from=repo_name)\n+\n+ with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n+ if \"step_*\" not in gitignore:\n+ gitignore.write(\"step_*\\n\")\n+ if \"epoch_*\" not in gitignore:\n+ gitignore.write(\"epoch_*\\n\")\n+ elif args.output_dir is not None:\n+ os.makedirs(args.output_dir, exist_ok=True)\n+ accelerator.wait_for_everyone()\n+\n+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n+ # (the dataset will be downloaded automatically from the datasets Hub).\n+ #\n+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n+ # 'text' is found. You can easily tweak this behavior (see below).\n+ #\n+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n+ # download the dataset.\n+ if args.dataset_name is not None:\n+ # Downloading and loading a dataset from the hub.\n+ raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n+ if \"validation\" not in raw_datasets.keys():\n+ raw_datasets[\"validation\"] = load_dataset(\n+ args.dataset_name,\n+ args.dataset_config_name,\n+ split=f\"train[:{args.validation_split_percentage}%]\",\n+ )\n+ raw_datasets[\"train\"] = load_dataset(\n+ args.dataset_name,\n+ args.dataset_config_name,\n+ split=f\"train[{args.validation_split_percentage}%:]\",\n+ )\n+ else:\n+ data_files = {}\n+ dataset_args = {}\n+ if args.train_file is not None:\n+ data_files[\"train\"] = args.train_file\n+ if args.validation_file is not None:\n+ data_files[\"validation\"] = args.validation_file\n+ extension = args.train_file.split(\".\")[-1]\n+ if extension == \"txt\":\n+ extension = \"text\"\n+ dataset_args[\"keep_linebreaks\"] = not args.no_keep_linebreaks\n+ raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)\n+ # If no validation data is there, validation_split_percentage will be used to divide the dataset.\n+ if \"validation\" not in raw_datasets.keys():\n+ raw_datasets[\"validation\"] = load_dataset(\n+ extension,\n+ data_files=data_files,\n+ split=f\"train[:{args.validation_split_percentage}%]\",\n+ **dataset_args,\n+ )\n+ raw_datasets[\"train\"] = load_dataset(\n+ extension,\n+ data_files=data_files,\n+ split=f\"train[{args.validation_split_percentage}%:]\",\n+ **dataset_args,\n+ )\n+\n+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n+ # https://huggingface.co/docs/datasets/loading_datasets.html.\n+\n+ # Load pretrained model and tokenizer\n+ #\n+ # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n+ # download model & vocab.\n+ if args.config_name:\n+ config = AutoConfig.from_pretrained(args.config_name)\n+ elif args.model_name_or_path:\n+ config = AutoConfig.from_pretrained(args.model_name_or_path)\n+ else:\n+ config = CONFIG_MAPPING[args.model_type]()\n+ logger.warning(\"You are instantiating a new config instance from scratch.\")\n+\n+ if args.tokenizer_name:\n+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)\n+ elif args.model_name_or_path:\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n+ else:\n+ raise ValueError(\n+ \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n+ \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n+ )\n+\n+ if args.model_name_or_path:\n+ model = AutoModelForCausalLM.from_pretrained(\n+ args.model_name_or_path,\n+ from_tf=bool(\".ckpt\" in args.model_name_or_path),\n+ config=config,\n+ )\n+ else:\n+ logger.info(\"Training new model from scratch\")\n+ model = AutoModelForCausalLM.from_config(config)\n+\n+ model.resize_token_embeddings(len(tokenizer))\n+\n+ # Preprocessing the datasets.\n+ # First we tokenize all the texts.\n+ column_names = raw_datasets[\"train\"].column_names\n+ text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n+\n+ def tokenize_function(examples):\n+ return tokenizer(examples[text_column_name])\n+\n+ with accelerator.main_process_first():\n+ tokenized_datasets = raw_datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ num_proc=args.preprocessing_num_workers,\n+ remove_columns=column_names,\n+ load_from_cache_file=not args.overwrite_cache,\n+ desc=\"Running tokenizer on dataset\",\n+ )\n+\n+ if args.block_size is None:\n+ block_size = tokenizer.model_max_length\n+ if block_size > 1024:\n+ logger.warning(\n+ f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n+ \"Picking 1024 instead. You can change that default value by passing --block_size xxx.\"\n+ )\n+ block_size = 1024\n+ else:\n+ if args.block_size > tokenizer.model_max_length:\n+ logger.warning(\n+ f\"The block_size passed ({args.block_size}) is larger than the maximum length for the model\"\n+ f\"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.\"\n+ )\n+ block_size = min(args.block_size, tokenizer.model_max_length)\n+\n+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.\n+ def group_texts(examples):\n+ # Concatenate all texts.\n+ concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n+ total_length = len(concatenated_examples[list(examples.keys())[0]])\n+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n+ # customize this part to your needs.\n+ if total_length >= block_size:\n+ total_length = (total_length // block_size) * block_size\n+ # Split by chunks of max_len.\n+ result = {\n+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n+ for k, t in concatenated_examples.items()\n+ }\n+ result[\"labels\"] = result[\"input_ids\"].copy()\n+ return result\n+\n+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder\n+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower\n+ # to preprocess.\n+ #\n+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map\n+\n+ with accelerator.main_process_first():\n+ lm_datasets = tokenized_datasets.map(\n+ group_texts,\n+ batched=True,\n+ num_proc=args.preprocessing_num_workers,\n+ load_from_cache_file=not args.overwrite_cache,\n+ desc=f\"Grouping texts in chunks of {block_size}\",\n+ )\n+\n+ train_dataset = lm_datasets[\"train\"]\n+ eval_dataset = lm_datasets[\"validation\"]\n+\n+ # Log a few random samples from the training set:\n+ for index in random.sample(range(len(train_dataset)), 3):\n+ logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n+\n+ # DataLoaders creation:\n+ train_dataloader = DataLoader(\n+ train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size\n+ )\n+\n+ # Optimizer\n+ # Split weights in two groups, one with weight decay and the other not.\n+ no_decay = [\"bias\", \"LayerNorm.weight\"]\n+ optimizer_grouped_parameters = [\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n+ \"weight_decay\": args.weight_decay,\n+ },\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n+ \"weight_decay\": 0.0,\n+ },\n+ ]\n+ # New Code #\n+ # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer\n+ if \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config:\n+ optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n+ else:\n+ optimizer = DummyOptim(param_groups=optimizer_grouped_parameters)\n+\n+ # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ model.tie_weights()\n+\n+ # Scheduler and math around the number of training steps.\n+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n+ if args.max_train_steps is None:\n+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n+ else:\n+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n+\n+ # New Code #\n+ # Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler\n+ if \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config:\n+ lr_scheduler = get_scheduler(\n+ name=args.lr_scheduler_type,\n+ optimizer=optimizer,\n+ num_warmup_steps=args.num_warmup_steps,\n+ num_training_steps=args.max_train_steps,\n+ )\n+ else:\n+ lr_scheduler = DummyScheduler()\n+\n+ # New Code #\n+ # Processing all the fields with `auto` values in the DeepSpeed Config file\n+ if accelerator.state.deepspeed_plugin.config_file != \"none\":", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -75,22 +51,65 @@ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):\n The optimizer to wrap.\n \"\"\"\n \n- def __init__(self, optimizer, model: DeepSpeedEngineWrapper):\n+ def __init__(self, optimizer):\n super().__init__(optimizer, device_placement=False, scaler=None)\n \n- self.model = model\n-\n def zero_grad(self, set_to_none=None):\n- pass # `model.step()` is doing that automatically. Therefore, it's implementation is not needed\n+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n \n def step(self):\n- \"\"\"This will handle optimizer.step() & optimizer.zero_grad() with gradient_accumulation\"\"\"\n- self.model.step()\n+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n \n @property\n- def is_overflow(self):\n+ def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n- overflow = False\n- if hasattr(self.optimizer, \"overflow\"):\n- overflow = self.optimizer.overflow\n- return overflow\n+ return self.optimizer.overflow\n+\n+\n+class DeepSpeedSchedulerWrapper(AcceleratedScheduler):\n+ \"\"\"\n+ Internal wrapper around a deepspeed scheduler.\n+\n+ Args:\n+ scheduler (`torch.optim.lr_scheduler.LambdaLR`):\n+ The scheduler to wrap.\n+ optimizers (one or a list of `torch.optim.Optimizer`):\n+ \"\"\"\n+\n+ def __init__(self, scheduler, optimizers):\n+ super().__init__(scheduler, optimizers)\n+\n+ def step(self):\n+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n+\n+\n+class DummyOptim:\n+ \"\"\"\n+ Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training\n+ loop when optimizer config is specified in the deepspeed config file.\n+\n+ Args:\n+ params (list of `torch.Tensor`):\n+ List of model parameters.\n+ param_groups (list of dict):\n+ List of dictionaries with parameters and their groups.\n+ \"\"\"\n+\n+ def __init__(self, params=None, param_groups=None):\n+ self.param_groups = []\n+ if params is None and param_groups is None:\n+ raise ValueError(\"Either params or param_groups should be specified\")\n+ if param_groups is None:\n+ self.param_groups.append({\"params\": params})\n+ else:\n+ self.param_groups = param_groups\n+\n+\n+class DummyScheduler:", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).\n+\n+Inference:\n+\n+1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but\n+ it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:\n+ [deepspeed-zero-inference](#deepspeed-zero-inference).\n+\n+\n+## How it works?\n+\n+**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Insallation details](https://github.com/microsoft/DeepSpeed#installation)\n+for more information.\n+\n+We will first look at easy to use integration via `deepspeed_plugin`. \n+Followed by more flexible and feature rich `deepspeed config file` integration. \n+\n+### Accelerate DeepSpeed Plugin\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer no and answer the following questions. \n+This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Plugin:\n+\n+**ZeRO Stage-2 DeepSpeed Plugin Example**\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config:\n+ gradient_accumulation_steps: 1\n+ gradient_clipping: 1.0\n+ offload_optimizer_device: none\n+ offload_param_device: none\n+ zero3_init_flag: true\n+ zero_stage: 2\n+distributed_type: DEEPSPEED\n+fsdp_config: {}\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: fp16\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/by_feature/deepspeed_with_config_support.py \\", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,542 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+Training advanced deep learning models is challenging. \n+Beyond model design, model scientists also need to set up the state-of-the-art training techniques such as \n+distributed training, mixed precision, gradient accumulation, and checkpointing. \n+Yet still, scientists may not achieve the desired system performance and convergence rate.\n+Large model sizes are even more challenging: a large model easily runs out of memory with pure data parallelism and it is difficult to use model parallelism. \n+[DeepSpeed](https://www.deepspeed.ai/) addresses these challenges to accelerate model development and training.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 with ZeRO-Infinity (CPU and NVME offload).", "from_author": true }, { "body": "You should host the image in the `huggingface/documentation-images` dataset as we try to limit the size of the repo. Also those won't be copied when building the doc (the two images in this imgs folder are not used in the documentation, just the main README).", "diff_hunk": "@@ -0,0 +1,508 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently it provides full support for:\n+\n+1. Optimizer state partitioning (ZeRO stage 1)\n+2. Gradient partitioning (ZeRO stage 2)\n+3. Parameter partitioning (ZeRO stage 3)\n+4. Custom mixed precision training handling\n+5. A range of fast CUDA-extension-based optimizers\n+6. ZeRO-Offload to CPU and Disk/NVMe\n+\n+ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU\n+Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).\n+\n+DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.\n+\n+DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which\n+won't be possible on a single GPU.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters. \n+Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)\n+![ZeRO Data Parallelism](./imgs/parallelism-zero.png)", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,508 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# DeepSpeed \n+\n+[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently it provides full support for:\n+\n+1. Optimizer state partitioning (ZeRO stage 1)\n+2. Gradient partitioning (ZeRO stage 2)\n+3. Parameter partitioning (ZeRO stage 3)\n+4. Custom mixed precision training handling\n+5. A range of fast CUDA-extension-based optimizers\n+6. ZeRO-Offload to CPU and Disk/NVMe\n+\n+ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU\n+Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).\n+\n+DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.\n+\n+DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which\n+won't be possible on a single GPU.\n+\n+πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:\n+\n+1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of\n+ this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. \n+ User may have to change few lines of code depending on the config.\n+2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. \n+ User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.\n+\n+## What is integrated?\n+\n+Training:\n+\n+1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters. \n+Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)\n+![ZeRO Data Parallelism](./imgs/parallelism-zero.png)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/405", "pr_id": 949353749 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex 5020c6cc4..4613e1e8d 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -14,7 +14,7 @@ jobs:\n - name: Install Python dependencies\n run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n - name: Run Tests\n- run: make test\n+ run: make test_cpu\n \n test_examples:\n runs-on: ubuntu-latest\n@@ -27,4 +27,4 @@ jobs:\n - name: Install Python dependencies\n run: pip install setuptools==59.5.0; pip install -e .[test] tensorboard\n - name: Run Tests\n- run: make test_examples\n\\ No newline at end of file\n+ run: make test_examples\ndiff --git a/Makefile b/Makefile\nindex db7893565..d2822153b 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -24,8 +24,12 @@ style:\n \tpython utils/style_doc.py src/accelerate docs/source --max_len 119\n \t\n # Run tests for the library\n-test:\n-\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/ --ignore=./tests/test_examples.py\n+test_cpu:\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py\n+\n+test_cuda:\n+\tpython -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/test_scheduler.py --ignore=./tests/test_cpu.py\n+\tpython -m pytest -s -v ./tests/test_cpu.py ./tests/test_scheduler.py\n \n test_examples:\n-\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/test_examples.py\n+\tpython -m pytest -s -v ./tests/test_examples.py\ndiff --git a/tests/test_cpu.py b/tests/test_cpu.py\nindex 72a8b2e7d..63ee69a97 100644\n--- a/tests/test_cpu.py\n+++ b/tests/test_cpu.py\n@@ -18,6 +18,6 @@\n from accelerate.test_utils import test_script\n \n \n-class MultiTPUTester(unittest.TestCase):\n+class MultiCPUTester(unittest.TestCase):\n def test_cpu(self):\n debug_launcher(test_script.main)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/401", "pr_id": 948342996 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 8a289442d..8304bcefe 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -301,7 +301,7 @@ def collate_fn(examples):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss.item(),\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\n },\n step=epoch,\n )\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 1ccc2c1b0..9538b21b1 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -222,7 +222,7 @@ def training_function(config, args):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss.item(),\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\n \"epoch\": epoch,\n },\n step=epoch,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex fd830442d..bd2ed3b29 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -258,7 +258,12 @@ def training_function(config, args):\n accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n if args.with_tracking:\n accelerator.log(\n- {\"accuracy\": 100 * eval_metric, \"total_loss\": total_loss, \"epoch\": epoch}, step=overall_step\n+ {\n+ \"accuracy\": 100 * eval_metric,\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\n+ \"epoch\": epoch,\n+ },\n+ step=overall_step,\n )\n if checkpointing_steps == \"epoch\":\n output_dir = f\"epoch_{epoch}\"\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex abf85c8d9..9c8b89706 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -246,7 +246,7 @@ def collate_fn(examples):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss.item(),\n+ \"train_loss\": total_loss.item() / len(train_dataloader),\n \"epoch\": epoch,\n },\n step=epoch,\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex af892f4eb..6ec2a143a 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -100,13 +100,13 @@ def test_cv_examples(self):\n cv_path = os.path.abspath(os.path.join(\"examples\", \"cv_example.py\"))\n special_strings = [\n \" \" * 16 + \"{\\n\\n\",\n- \" \" * 18 + '\"accuracy\": eval_metric[\"accuracy\"],\\n\\n',\n- \" \" * 18 + '\"f1\": eval_metric[\"f1\"],\\n\\n',\n- \" \" * 18 + '\"train_loss\": total_loss.item(),\\n\\n',\n- \" \" * 18 + '\"epoch\": epoch,\\n\\n',\n+ \" \" * 20 + '\"accuracy\": eval_metric[\"accuracy\"],\\n\\n',\n+ \" \" * 20 + '\"f1\": eval_metric[\"f1\"],\\n\\n',\n+ \" \" * 20 + '\"train_loss\": total_loss.item() / len(train_dataloader),\\n\\n',\n+ \" \" * 20 + '\"epoch\": epoch,\\n\\n',\n \" \" * 16 + \"},\\n\\n\",\n \" \" * 16 + \"step=epoch,\\n\",\n- \" \" * 8,\n+ \" \" * 12,\n ]\n self.one_complete_example(\"complete_cv_example.py\", True, cv_path, special_strings)\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@pacman100 @sgugger In a multi-gpu environment, does the **total_loss** here represent the loss of all training data in the entire epoch or just the loss of training data on the main process? \r\n\r\nI tried to print the result of total_loss on each process, and it seems that accelerator.log() only records the total_loss on the main process. So, please, do I need to use accelerator.gather() to get the training loss on the whole epoch? I hope I can get your confirmation, many thanks!", "from_author": false }, { "body": "Hello @benjpau , yes, you are correct, you would need to `gather` to get the total epoch loss. Will raise a PR to fix the examples to reflect the same.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/399", "pr_id": 947926044 }, { "diff": "diff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex 660bc521a..a562e9283 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -21,44 +21,13 @@\n import torch\n \n \n-def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):\n- \"\"\"\n- Offload a state dict in a given folder.\n-\n- Args:\n- save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict.\n- state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload.\n- \"\"\"\n- os.makedirs(save_dir, exist_ok=True)\n- index = {}\n- for name, parameter in state_dict.items():\n- tensor_file = os.path.join(save_dir, f\"{name}.dat\")\n- array = parameter.numpy()\n- index[name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n- if array.ndim == 0:\n- array = array[None]\n- file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n- file_array[:] = array[:]\n- file_array.flush()\n-\n- # Update index\n- index_file = os.path.join(save_dir, \"index.json\")\n- if os.path.isfile(index_file):\n- with open(index_file, \"r\", encoding=\"utf-8\") as f:\n- current_index = json.load(f)\n- else:\n- current_index = {}\n- current_index.update(index)\n-\n- with open(index_file, \"w\", encoding=\"utf-8\") as f:\n- json.dump(current_index, f, indent=2)\n-\n-\n def offload_weight(weight, weight_name, offload_folder, index=None):\n array = weight.numpy()\n tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\n if index is not None:\n index[weight_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ if array.ndim == 0:\n+ array = array[None]\n file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n file_array[:] = array[:]\n file_array.flush()\n@@ -82,6 +51,23 @@ def save_offload_index(index, offload_folder):\n json.dump(current_index, f, indent=2)\n \n \n+def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):\n+ \"\"\"\n+ Offload a state dict in a given folder.\n+\n+ Args:\n+ save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict.\n+ state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload.\n+ \"\"\"\n+ os.makedirs(save_dir, exist_ok=True)\n+ index = {}\n+ for name, parameter in state_dict.items():\n+ index = offload_weight(parameter, name, save_dir, index=index)\n+\n+ # Update index\n+ save_offload_index(index, save_dir)\n+\n+\n class PrefixedDataset(Mapping):\n \"\"\"\n Will access keys in a given dataset by adding a prefix.\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/398", "pr_id": 947471025 }, { "diff": "diff --git a/README.md b/README.md\nindex a3fc74cef..44b25778e 100644\n--- a/README.md\n+++ b/README.md\n@@ -212,6 +212,7 @@ If you like the simplicity of πŸ€— Accelerate but would prefer a higher-level ab\n \n * [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common \"breakpoints\" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).\n * [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model train, and inference logic.\n+* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.\n * [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.\n * [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/396", "pr_id": 947245955 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex cb904a9b5..6dc1f847c 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -21,8 +21,6 @@\n \n import torch\n \n-from packaging import version\n-\n from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\n from .data_loader import prepare_data_loader\n from .logging import get_logger\n@@ -46,6 +44,7 @@\n gather,\n get_pretty_name,\n is_deepspeed_available,\n+ is_torch_version,\n pad_across_processes,\n reduce,\n save,\n@@ -173,7 +172,7 @@ def __init__(\n os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n- if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ if is_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):\n raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n \n # Kwargs handlers\n@@ -212,7 +211,7 @@ def __init__(\n self.device_placement = device_placement\n self.split_batches = split_batches\n self.dispatch_batches = dispatch_batches\n- if dispatch_batches is True and version.parse(torch.__version__) < version.parse(\"1.8.0\"):\n+ if dispatch_batches is True and is_torch_version(\"<\", \"1.8.0\"):\n raise ImportError(\n \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n )\n@@ -222,15 +221,15 @@ def __init__(\n self.scaler = None\n self.native_amp = False\n if self.state.mixed_precision == \"fp16\":\n- self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.6\")\n- if version.parse(torch.__version__) < version.parse(\"1.6\"):\n+ self.native_amp = is_torch_version(\">=\", \"1.6\")\n+ if not self.native_amp:\n raise ValueError(\"fp16 mixed precision requires PyTorch >= 1.6\")\n \n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n elif self.state.mixed_precision == \"bf16\":\n- self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.10\")\n- if mixed_precision == \"bf16\" and version.parse(torch.__version__) < version.parse(\"1.10\"):\n+ self.native_amp = is_torch_version(\">=\", \"1.10\")\n+ if mixed_precision == \"bf16\" and not self.native_amp:\n raise ValueError(\"bf16 mixed precision requires PyTorch >= 1.10\")\n \n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n@@ -245,7 +244,7 @@ def __init__(\n # RNG Types\n self.rng_types = rng_types\n if self.rng_types is None:\n- self.rng_types = [\"torch\"] if version.parse(torch.__version__) <= version.parse(\"1.5.1\") else [\"generator\"]\n+ self.rng_types = [\"torch\"] if is_torch_version(\"<=\", \"1.5.1\") else [\"generator\"]\n \n @property\n def distributed_type(self):\n@@ -486,7 +485,7 @@ def prepare_model(self, model):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n- if self.mixed_precision == \"fp16\" and version.parse(torch.__version__) >= version.parse(\"1.10\"):\n+ if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n elif self.mixed_precision == \"bf16\":\n model.forward = torch.cuda.amp.autocast(dtype=torch.bfloat16)(model.forward)\n@@ -924,7 +923,7 @@ def autocast(self):\n different will happen otherwise.\n \"\"\"\n if self.native_amp:\n- if self.mixed_precision == \"fp16\" and version.parse(torch.__version__) >= version.parse(\"1.10\"):\n+ if self.mixed_precision == \"fp16\" and is_torch_version(\">=\", \"1.10\"):\n autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n elif self.mixed_precision == \"bf16\":\n autocast_context = torch.cuda.amp.autocast(dtype=torch.bfloat16)\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex ff4522162..22c8e621c 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -33,8 +33,7 @@\n PrepareForLaunch,\n is_sagemaker_available,\n )\n-from accelerate.utils.versions import torch_version\n-from packaging import version\n+from accelerate.utils.versions import is_torch_version\n \n \n def launch_command_parser(subparsers=None):\n@@ -219,9 +218,9 @@ def simple_launcher(args):\n \n \n def multi_gpu_launcher(args):\n- if torch_version >= version.parse(\"1.10.0\"):\n+ if is_torch_version(\">=\", \"1.10.0\"):\n cmd = [\"torchrun\"]\n- elif torch_version >= version.parse(\"1.9.0\"):\n+ elif is_torch_version(\">=\", \"1.9.0\"):\n cmd = [sys.executable, \"-m\", \"torch.distributed.run\"]\n else:\n cmd = [sys.executable, \"-m\", \"torch.distributed.launch\", \"--use_env\"]\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 50aeb57b6..79ba6b3f3 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -18,8 +18,6 @@\n import torch\n from torch.utils.data import BatchSampler, DataLoader, IterableDataset\n \n-from packaging import version\n-\n from .state import AcceleratorState, DistributedType, is_tpu_available\n from .utils import (\n RNGType,\n@@ -29,6 +27,7 @@\n find_batch_size,\n get_data_structure,\n initialize_tensors,\n+ is_torch_version,\n send_to_device,\n slice_tensors,\n synchronize_rng_states,\n@@ -61,7 +60,7 @@\n }\n \n for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():\n- if version.parse(torch.__version__) >= version.parse(v):\n+ if is_torch_version(\">=\", v):\n _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)\n \n \n@@ -327,7 +326,7 @@ class DataLoaderDispatcher(DataLoader):\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n shuffle = False\n- if version.parse(torch.__version__) >= version.parse(\"1.11.0\"):\n+ if is_torch_version(\">=\", \"1.11.0\"):\n from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe\n \n # We need to save the shuffling state of the DataPipe\n@@ -335,7 +334,7 @@ def __init__(self, dataset, split_batches: bool = False, **kwargs):\n shuffle = dataset._shuffle_enabled\n super().__init__(dataset, **kwargs)\n self.split_batches = split_batches\n- if version.parse(torch.__version__) < version.parse(\"1.8.0\"):\n+ if is_torch_version(\"<\", \"1.8.0\"):\n raise ImportError(\n \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n )\n@@ -487,7 +486,7 @@ def prepare_data_loader(\n \n </Tip>\"\"\"\n if dispatch_batches is None:\n- if version.parse(torch.__version__) < version.parse(\"1.8.0\") or not put_on_device:\n+ if is_torch_version(\"<\", \"1.8.0\") or not put_on_device:\n dispatch_batches = False\n else:\n dispatch_batches = isinstance(dataloader.dataset, IterableDataset)\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex d3cd2e1a2..298db439b 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -19,10 +19,8 @@\n \n import torch\n \n-from packaging import version\n-\n from .state import AcceleratorState\n-from .utils import PrecisionType, PrepareForLaunch, patch_environment\n+from .utils import PrecisionType, PrepareForLaunch, is_torch_version, patch_environment\n \n \n def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n@@ -85,7 +83,7 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n \n if num_processes > 1:\n # Multi-GPU launch\n- if version.parse(torch.__version__) < version.parse(\"1.5.0\"):\n+ if is_torch_version(\"<\", \"1.5.0\"):\n raise ImportError(\n \"Using `notebook_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n f\"{torch.__version__}.\"\n@@ -156,7 +154,7 @@ def debug_launcher(function, args=(), num_processes=2):\n num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n \"\"\"\n- if version.parse(torch.__version__) < version.parse(\"1.5.0\"):\n+ if is_torch_version(\"<\", \"1.5.0\"):\n raise ImportError(\n \"Using `debug_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n f\"{torch.__version__}.\"\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex dcf792592..3d5977968 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -17,10 +17,8 @@\n \n import torch\n \n-from packaging import version\n-\n from .state import AcceleratorState\n-from .utils import DistributedType, honor_type, is_tpu_available\n+from .utils import DistributedType, honor_type, is_torch_version, is_tpu_available\n \n \n if is_tpu_available():\n@@ -103,7 +101,7 @@ def state_dict(self):\n return self.optimizer.state_dict()\n \n def zero_grad(self, set_to_none=None):\n- if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if is_torch_version(\"<\", \"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex 48e7bcc49..a23179bd7 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -21,8 +21,7 @@\n from accelerate.data_loader import prepare_data_loader\n from accelerate.state import AcceleratorState\n from accelerate.test_utils import RegressionDataset, RegressionModel, are_the_same_tensors\n-from accelerate.utils import DistributedType, gather, set_seed, synchronize_rng_states\n-from packaging import version\n+from accelerate.utils import DistributedType, gather, is_torch_version, set_seed, synchronize_rng_states\n \n \n def init_state_check():\n@@ -40,7 +39,7 @@ def rng_sync_check():\n if state.distributed_type == DistributedType.MULTI_GPU:\n synchronize_rng_states([\"cuda\"])\n assert are_the_same_tensors(torch.cuda.get_rng_state()), \"RNG states improperly synchronized on GPU.\"\n- if version.parse(torch.__version__) >= version.parse(\"1.6.0\"):\n+ if is_torch_version(\">=\", \"1.6.0\"):\n generator = torch.Generator()\n synchronize_rng_states([\"generator\"], generator=generator)\n assert are_the_same_tensors(generator.get_state()), \"RNG states improperly synchronized in generator.\"\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 19d8b81d4..186f27bb7 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -73,6 +73,7 @@\n send_to_device,\n slice_tensors,\n )\n+from .versions import compare_versions, is_torch_version\n \n \n if is_deepspeed_available():\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nindex c2c36a486..af8a6bf35 100644\n--- a/src/accelerate/utils/constants.py\n+++ b/src/accelerate/utils/constants.py\n@@ -12,8 +12,13 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import operator as op\n+\n+\n SCALER_NAME = \"scaler.pt\"\n MODEL_NAME = \"pytorch_model\"\n RNG_STATE_NAME = \"random_states\"\n OPTIMIZER_NAME = \"optimizer\"\n SCHEDULER_NAME = \"scheduler\"\n+\n+STR_OPERATION_TO_FUNC = {\">\": op.gt, \">=\": op.ge, \"==\": op.eq, \"!=\": op.ne, \"<=\": op.le, \"<\": op.lt}\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex 3fc9d969c..954bb5dfd 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -23,11 +23,10 @@\n import torch\n from torch.distributed import ReduceOp\n \n-from packaging import version\n-\n from ..state import AcceleratorState\n from .dataclasses import DistributedType, TensorInformation\n from .imports import is_tpu_available\n+from .versions import is_torch_version\n \n \n if is_tpu_available():\n@@ -459,8 +458,7 @@ def _convert_to_fp32(tensor):\n \n def _is_fp16_bf16_tensor(tensor):\n return hasattr(tensor, \"dtype\") and (\n- tensor.dtype == torch.float16\n- or (version.parse(torch.__version__) >= version.parse(\"1.10\") and tensor.dtype == torch.bfloat16)\n+ tensor.dtype == torch.float16 or (is_torch_version(\">=\", \"1.10\") and tensor.dtype == torch.bfloat16)\n )\n \n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\ndiff --git a/src/accelerate/utils/versions.py b/src/accelerate/utils/versions.py\nindex 36dd7ac48..38674d4b3 100644\n--- a/src/accelerate/utils/versions.py\n+++ b/src/accelerate/utils/versions.py\n@@ -13,8 +13,11 @@\n # limitations under the License.\n \n import sys\n+from typing import Union\n \n-from packaging.version import parse\n+from packaging.version import Version, parse\n+\n+from .constants import STR_OPERATION_TO_FUNC\n \n \n if sys.version_info < (3, 8):\n@@ -23,3 +26,36 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"\n+ Compares a library version to some requirement using a given operation.\n+\n+ Args:\n+ library_or_version (`str` or `packaging.version.Version`):\n+ A library name or a version to check.\n+ operation (`str`):\n+ A string representation of an operator, such as `\">\"` or `\"<=\"`.\n+ requirement_version (`str`):\n+ The version to compare the library version against\n+ \"\"\"\n+ if operation not in STR_OPERATION_TO_FUNC.keys():\n+ raise ValueError(f\"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}\")\n+ operation = STR_OPERATION_TO_FUNC[operation]\n+ if isinstance(library_or_version, str):\n+ library_or_version = parse(importlib_metadata.version(library_or_version))\n+ return operation(library_or_version, parse(requirement_version))\n+\n+\n+def is_torch_version(operation: str, version: str):\n+ \"\"\"\n+ Compares the current PyTorch version to a given reference with an operation.\n+\n+ Args:\n+ operation (`str`):\n+ A string representation of an operator, such as `\">\"` or `\"<=\"`\n+ version (`str`):\n+ A string version of PyTorch\n+ \"\"\"\n+ return compare_versions(torch_version, operation, version)\n", "code_comments": [ { "body": "Not super fan of the name, wdyt of `torch_version_is(\"<\", \"1.12.0.dev20220418+cu113\")` or `is_torch_version(\"<\", \"1.12.0.dev20220418+cu113\")`?", "diff_hunk": "@@ -173,7 +172,7 @@ def __init__(\n os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n- if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ if check_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):", "from_author": false }, { "body": "Meant to be `compare_version` here?", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):\n+ A library name or Version to check\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"\n+ requirement_version (`str`):\n+ The version to compare the library version against\n+ \"\"\"\n+ if operation not in STR_OPERATION_TO_FUNC.keys():\n+ raise ValueError(f\"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}\")\n+ operation = STR_OPERATION_TO_FUNC[operation]\n+ if isinstance(library_or_version, str):\n+ library_or_version = parse(importlib_metadata.version(library_or_version))\n+ return operation(library_or_version, parse(requirement_version))\n+\n+\n+def check_torch_version(operation: str, version: str):\n+ \"\"\"Compares the current PyTorch version to `version` with an `operation`\n+\n+ Args:\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"\n+ version (`str`):\n+ A string version of PyTorch\n+ \"\"\"\n+ return check_torch_version(torch_version, operation, version)", "from_author": false }, { "body": "Yup, nice catch! ", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):\n+ A library name or Version to check\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"\n+ requirement_version (`str`):\n+ The version to compare the library version against\n+ \"\"\"\n+ if operation not in STR_OPERATION_TO_FUNC.keys():\n+ raise ValueError(f\"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}\")\n+ operation = STR_OPERATION_TO_FUNC[operation]\n+ if isinstance(library_or_version, str):\n+ library_or_version = parse(importlib_metadata.version(library_or_version))\n+ return operation(library_or_version, parse(requirement_version))\n+\n+\n+def check_torch_version(operation: str, version: str):\n+ \"\"\"Compares the current PyTorch version to `version` with an `operation`\n+\n+ Args:\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"\n+ version (`str`):\n+ A string version of PyTorch\n+ \"\"\"\n+ return check_torch_version(torch_version, operation, version)", "from_author": true }, { "body": "`is_torch_version` sounds better, will go with that. ", "diff_hunk": "@@ -173,7 +172,7 @@ def __init__(\n os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n- if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ if check_torch_version(\"<\", \"1.12.0.dev20220418+cu113\"):", "from_author": true }, { "body": "```suggestion\r\n library_or_version (`str` or `packaging.version.Version`):\r\n```", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):", "from_author": false }, { "body": "```suggestion\r\n A library name or a version to check.\r\n```", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):\n+ A library name or Version to check", "from_author": false }, { "body": "```suggestion\r\n \"\"\"\r\n Compares a library version to some requirement using a given operation.\r\n```\r\nI prefer to have complete English sentence describe the function :-)", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`", "from_author": false }, { "body": "```suggestion\r\n A string representation of an operator, such as `\">\"` or `\"<=\"`.\r\n```", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):\n+ A library name or Version to check\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"", "from_author": false }, { "body": "```suggestion\r\n \"\"\"\r\n Compares the current PyTorch version to a given reference with an operation.\r\n```", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):\n+ A library name or Version to check\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"\n+ requirement_version (`str`):\n+ The version to compare the library version against\n+ \"\"\"\n+ if operation not in STR_OPERATION_TO_FUNC.keys():\n+ raise ValueError(f\"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}\")\n+ operation = STR_OPERATION_TO_FUNC[operation]\n+ if isinstance(library_or_version, str):\n+ library_or_version = parse(importlib_metadata.version(library_or_version))\n+ return operation(library_or_version, parse(requirement_version))\n+\n+\n+def is_torch_version(operation: str, version: str):\n+ \"\"\"Compares the current PyTorch version to `version` with an `operation`", "from_author": false }, { "body": "```suggestion\r\n A string representation of an operator, such as `\">\"` or `\"<=\"`\r\n```", "diff_hunk": "@@ -23,3 +26,34 @@\n import importlib.metadata as importlib_metadata\n \n torch_version = parse(importlib_metadata.version(\"torch\"))\n+\n+\n+def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):\n+ \"\"\"Compares `library_or_version` to `requirement_version` with an `operation`\n+\n+ Args:\n+ library_or_version (`str`, `packaging.version.Version]`):\n+ A library name or Version to check\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"\n+ requirement_version (`str`):\n+ The version to compare the library version against\n+ \"\"\"\n+ if operation not in STR_OPERATION_TO_FUNC.keys():\n+ raise ValueError(f\"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}\")\n+ operation = STR_OPERATION_TO_FUNC[operation]\n+ if isinstance(library_or_version, str):\n+ library_or_version = parse(importlib_metadata.version(library_or_version))\n+ return operation(library_or_version, parse(requirement_version))\n+\n+\n+def is_torch_version(operation: str, version: str):\n+ \"\"\"Compares the current PyTorch version to `version` with an `operation`\n+\n+ Args:\n+ operation (`str`):\n+ A string representation of an operator, such as \">\" or \"<=\"", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/395", "pr_id": 947199933 }, { "diff": "diff --git a/setup.py b/setup.py\nindex 7d463cf88..dcfe3fd8e 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -55,7 +55,7 @@\n ]\n },\n python_requires=\">=3.6.0\",\n- install_requires=[\"torch>=1.4.0\", \"pyyaml\", \"numpy>=1.17\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "code_comments": [ { "body": "If we're alphabetically sorting, let's really do it ;-)", "diff_hunk": "@@ -55,7 +55,7 @@\n ]\n },\n python_requires=\">=3.6.0\",\n- install_requires=[\"torch>=1.4.0\", \"pyyaml\", \"numpy>=1.17\"],\n+ install_requires=[\"numpy>=1.17\", \"pyyaml\", \"torch>=1.4.0\", \"packaging>=20.0\"],", "from_author": false }, { "body": "Fixed, heh", "diff_hunk": "@@ -55,7 +55,7 @@\n ]\n },\n python_requires=\">=3.6.0\",\n- install_requires=[\"torch>=1.4.0\", \"pyyaml\", \"numpy>=1.17\"],\n+ install_requires=[\"numpy>=1.17\", \"pyyaml\", \"torch>=1.4.0\", \"packaging>=20.0\"],", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/394", "pr_id": 947123669 }, { "diff": "diff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\nindex 0a5bca96d..dea10ba50 100644\n--- a/src/accelerate/hooks.py\n+++ b/src/accelerate/hooks.py\n@@ -260,6 +260,24 @@ def detach_hook(self, module):\n set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))\n \n \n+def attach_execution_device_hook(module: torch.nn.Module, execution_device: Union[int, str, torch.device]):\n+ \"\"\"\n+ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right\n+ execution device\n+\n+ Args:\n+ module (`torch.nn.Module`):\n+ The module where we want to attach the hooks.\n+ execution_device (`int`, `str` or `torch.device`):\n+ The device on which inputs and model weights should be placed before the forward pass.\n+ \"\"\"\n+ if not hasattr(module, \"_hf_hook\") and len(module.state_dict()) > 0:\n+ add_hook_to_module(module, AlignDevicesHook(execution_device))\n+\n+ for child in module.children():\n+ attach_execution_device_hook(child, execution_device)\n+\n+\n def attach_align_device_hook(\n module: torch.nn.Module,\n execution_device: Optional[torch.device] = None,\n@@ -383,6 +401,7 @@ def attach_align_device_hook_on_blocks(\n place_submodules=True,\n )\n add_hook_to_module(module, hook)\n+ attach_execution_device_hook(module, execution_device[module_name])\n elif module_name in execution_device:\n attach_align_device_hook(\n module,\n@@ -395,6 +414,7 @@ def attach_align_device_hook_on_blocks(\n if not hasattr(module, \"_hf_hook\"):\n hook = AlignDevicesHook(execution_device=execution_device[module_name], io_same_device=(module_name == \"\"))\n add_hook_to_module(module, hook)\n+ attach_execution_device_hook(module, execution_device[module_name])\n elif module_name == \"\":\n hook = AlignDevicesHook(io_same_device=True)\n add_hook_to_module(module, hook)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Not yet, will have a look before merging (though I don't expect much difference since there is not that many submodules).", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/392", "pr_id": 945966905 }, { "diff": "diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml\nnew file mode 100644\nindex 000000000..6b7987e51\n--- /dev/null\n+++ b/.github/workflows/build-docker-images.yml\n@@ -0,0 +1,53 @@\n+name: Build Docker images (scheduled)\n+\n+on:\n+ repository_dispatch:\n+ schedule:\n+ - cron: \"0 1 * * *\"\n+\n+concurrency:\n+ group: docker-image-builds\n+ cancel-in-progress: false\n+\n+jobs:\n+ latest-cpu:\n+ name: \"Latest Accelerate CPU [dev]\"\n+ runs-on: ubuntu-latest\n+ steps:\n+ - name: Set up Docker Buildx\n+ uses: docker/setup-buildx-action@v1\n+ - name: Check out code\n+ uses: actions/checkout@v2\n+ - name: Login to DockerHub\n+ uses: docker/login-action@v1\n+ with:\n+ username: ${{ secrets.DOCKERHUB_USERNAME }}\n+ password: ${{ secrets.DOCKERHUB_PASSWORD }}\n+\n+ - name: Build and Push CPU\n+ uses: docker/build-push-action@v2\n+ with:\n+ context: ./docker/accelerate-cpu\n+ push: true\n+ tags: huggingface/accelerate-cpu\n+\n+ latest-cuda:\n+ name: \"Latest Accelerate GPU [dev]\"\n+ runs-on: ubuntu-latest\n+ steps:\n+ - name: Set up Docker Buildx\n+ uses: docker/setup-buildx-action@v1\n+ - name: Check out code\n+ uses: actions/checkout@v2\n+ - name: Login to DockerHub\n+ uses: docker/login-action@v1\n+ with:\n+ username: ${{ secrets.DOCKERHUB_USERNAME }}\n+ password: ${{ secrets.DOCKERHUB_PASSWORD }}\n+\n+ - name: Build and Push GPU\n+ uses: docker/build-push-action@v2\n+ with:\n+ context: ./docker/accelerate-gpu\n+ push: true\n+ tags: huggingface/accelerate-gpu\n\\ No newline at end of file\ndiff --git a/docker/accelerate-cpu/Dockerfile b/docker/accelerate-cpu/Dockerfile\nindex 06379e69b..c536fb70c 100644\n--- a/docker/accelerate-cpu/Dockerfile\n+++ b/docker/accelerate-cpu/Dockerfile\n@@ -16,7 +16,6 @@ ENV VIRTUAL_ENV=/opt/venv\n RUN python3 -m venv ${VIRTUAL_ENV}\n # Make sure we use the virtualenv\n ENV PATH=\"${VIRTUAL_ENV}/bin:$PATH\"\n-\n WORKDIR /workspace\n # Install specific CPU torch wheel to save on space\n RUN python3 -m pip install --upgrade --no-cache-dir pip\n@@ -28,6 +27,8 @@ RUN python3 -m pip install --no-cache-dir \\\n # Stage 2\n FROM python:3.6-slim AS build-image\n COPY --from=compile-image /opt/venv /opt/venv\n+RUN useradd -ms /bin/bash user\n+USER user\n \n # Make sure we use the virtualenv\n ENV PATH=\"/opt/venv/bin:$PATH\"\ndiff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nindex b371042e1..299f573d8 100644\n--- a/docker/accelerate-gpu/Dockerfile\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -31,5 +31,8 @@ COPY --from=compile-image /opt/conda /opt/conda\n ENV PATH /opt/conda/bin:$PATH\n \n RUN echo \"source activate accelerate\" >> /.bashrc\n+\n+RUN useradd -ms /bin/bash user\n+USER user\n # Activate the virtualenv\n CMD [\"/bin/bash\"]\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/391", "pr_id": 945835312 }, { "diff": "diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml\nindex 45181a092..c98ce0bb1 100644\n--- a/.github/workflows/stale.yml\n+++ b/.github/workflows/stale.yml\n@@ -3,6 +3,7 @@ name: Stale Bot\n on:\n schedule:\n - cron: \"0 15 * * *\"\n+ workflow_dispatch:\n \n jobs:\n close_stale_issues:\n@@ -12,7 +13,7 @@ jobs:\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n steps:\n- - uses: actions/@checkoutv2\n+ - uses: actions/checkout@v2\n \n - name: Setup Python\n uses: actions/setup-python@v1\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/390", "pr_id": 945806096 }, { "diff": "diff --git a/examples/cv_example.py b/examples/cv_example.py\nindex a80b42d13..483bddac3 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -73,7 +73,7 @@ def __getitem__(self, idx):\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mix_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n \n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n", "code_comments": [], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_388). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/388", "pr_id": 944804577 }, { "diff": "diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml\nnew file mode 100644\nindex 000000000..45181a092\n--- /dev/null\n+++ b/.github/workflows/stale.yml\n@@ -0,0 +1,27 @@\n+name: Stale Bot\n+\n+on:\n+ schedule:\n+ - cron: \"0 15 * * *\"\n+\n+jobs:\n+ close_stale_issues:\n+ name: Close Stale Issues\n+ if: github.repository == 'huggingface/accelerate'\n+ runs-on: ubuntu-latest\n+ env:\n+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n+ steps:\n+ - uses: actions/@checkoutv2\n+ \n+ - name: Setup Python\n+ uses: actions/setup-python@v1\n+ with:\n+ python-version: 3.7\n+ \n+ - name: Install requirements\n+ run: |\n+ pip install PyGithub\n+ - name: Close stale issues\n+ run: |\n+ python utils/stale.py\n\\ No newline at end of file\ndiff --git a/utils/stale.py b/utils/stale.py\nnew file mode 100644\nindex 000000000..1d8f9020b\n--- /dev/null\n+++ b/utils/stale.py\n@@ -0,0 +1,66 @@\n+# Copyright 2022 The HuggingFace Team, the AllenNLP library authors. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+Script to close stale issue. Taken in part from the AllenNLP repository.\n+https://github.com/allenai/allennlp.\n+\"\"\"\n+from datetime import datetime as dt\n+import os\n+\n+from github import Github\n+\n+\n+LABELS_TO_EXEMPT = [\n+ \"good first issue\",\n+ \"feature request\",\n+ \"wip\",\n+]\n+\n+\n+def main():\n+ g = Github(os.environ[\"GITHUB_TOKEN\"])\n+ repo = g.get_repo(\"huggingface/accelerate\")\n+ open_issues = repo.get_issues(state=\"open\")\n+\n+ for issue in open_issues:\n+ comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True)\n+ last_comment = comments[0] if len(comments) > 0 else None\n+ current_time = dt.utcnow()\n+ days_since_updated = (current_time - issue.updated_at).days\n+ days_since_creation = (current_time - issue.created_at).days\n+ if (\n+ last_comment is not None and last_comment.user.login == \"github-actions[bot]\"\n+ and days_since_updated > 7\n+ and days_since_creation >= 30\n+ and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())\n+ ):\n+ # Close issue since it has been 7 days of inactivity since bot mention.\n+ issue.edit(state=\"closed\")\n+ elif (\n+ days_since_updated > 23\n+ and days_since_creation >= 30\n+ and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())\n+ ):\n+ # Add stale comment\n+ issue.create_comment(\n+ \"This issue has been automatically marked as stale because it has not had \"\n+ \"recent activity. If you think this still needs to be addressed \"\n+ \"please comment on this thread.\\n\\nPlease note that issues that do not follow the \"\n+ \"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) \"\n+ \"are likely to be ignored.\"\n+ )\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/387", "pr_id": 944801736 }, { "diff": "diff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nindex 8efce8dea..1fac5f012 100644\n--- a/src/accelerate/scheduler.py\n+++ b/src/accelerate/scheduler.py\n@@ -61,7 +61,9 @@ def step(self, *args, **kwargs):\n # num_processes steps per training step\n num_processes = AcceleratorState().num_processes\n for _ in range(num_processes):\n- self.scheduler.step(*args, **kwargs)\n+ # Special case when using OneCycle and `drop_last` was not used\n+ if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:\n+ self.scheduler.step(*args, **kwargs)\n \n # Passthroughs\n def get_last_lr(self):\n", "code_comments": [ { "body": "Should we guard around the `last_epoch` attribute here as well somehow?", "diff_hunk": "@@ -61,7 +61,9 @@ def step(self, *args, **kwargs):\n # num_processes steps per training step\n num_processes = AcceleratorState().num_processes\n for _ in range(num_processes):\n- self.scheduler.step(*args, **kwargs)\n+ # Special case when using OneCycle and `drop_last` was not used\n+ if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:", "from_author": false }, { "body": "`last_epoch` is part of the base scheduler object, so it's always present: https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py#L24", "diff_hunk": "@@ -61,7 +61,9 @@ def step(self, *args, **kwargs):\n # num_processes steps per training step\n num_processes = AcceleratorState().num_processes\n for _ in range(num_processes):\n- self.scheduler.step(*args, **kwargs)\n+ # Special case when using OneCycle and `drop_last` was not used\n+ if getattr(self.scheduler, \"total_steps\", 0) <= self.scheduler.last_epoch:", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/385", "pr_id": 944612508 }, { "diff": "diff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex f65b6b115..19d8b81d4 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -40,10 +40,18 @@\n get_max_memory,\n infer_auto_device_map,\n load_checkpoint_in_model,\n+ load_offloaded_weights,\n named_module_tensors,\n set_module_tensor_to_device,\n )\n-from .offload import OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, offload_state_dict\n+from .offload import (\n+ OffloadedWeightsLoader,\n+ PrefixedDataset,\n+ extract_submodules_state_dict,\n+ offload_state_dict,\n+ offload_weight,\n+ save_offload_index,\n+)\n from .operations import (\n broadcast,\n broadcast_object_list,\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nindex 08c6f269d..0329f8786 100644\n--- a/src/accelerate/utils/modeling.py\n+++ b/src/accelerate/utils/modeling.py\n@@ -25,6 +25,8 @@\n import torch\n import torch.nn as nn\n \n+from .offload import offload_weight, save_offload_index\n+\n \n WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n \n@@ -312,6 +314,18 @@ def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], modul\n return device_map\n \n \n+def load_offloaded_weights(model, index, offload_folder):\n+ if index is None or len(index) == 0:\n+ # Nothing to do\n+ return\n+\n+ for param_name, metadata in index.items():\n+ tensor_file = os.path.join(offload_folder, f\"{param_name}.dat\")\n+ shape = tuple(metadata[\"shape\"])\n+ weight = np.memmap(tensor_file, dtype=metadata[\"dtype\"], mode=\"r\", shape=shape)\n+ set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n+\n+\n def infer_auto_device_map(\n model: nn.Module,\n max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n@@ -581,20 +595,10 @@ def load_checkpoint_in_model(\n \n if param_device == \"disk\":\n set_module_tensor_to_device(model, param_name, \"meta\")\n- tensor_file = os.path.join(offload_folder, f\"{param_name}.dat\")\n- array = param.numpy()\n- offload_index[param_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n- file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n- file_array[:] = array[:]\n- file_array.flush()\n+ offload_weight(param, param_name, offload_folder, index=offload_index)\n elif param_device == \"cpu\" and offload_state_dict:\n set_module_tensor_to_device(model, param_name, \"meta\")\n- tensor_file = os.path.join(state_dict_folder, f\"{param_name}.dat\")\n- array = param.numpy()\n- state_dict_index[param_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n- file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n- file_array[:] = array[:]\n- file_array.flush()\n+ offload_weight(param, param_name, state_dict_folder, index=state_dict_index)\n else:\n set_module_tensor_to_device(model, param_name, param_device, value=param)\n \n@@ -602,23 +606,9 @@ def load_checkpoint_in_model(\n del checkpoint\n gc.collect()\n \n- if len(offload_index) > 0:\n- offload_index_file = os.path.join(offload_folder, \"index.json\")\n- if os.path.isfile(offload_index_file):\n- with open(offload_index_file, \"r\", encoding=\"utf-8\") as f:\n- current_offload_index = json.load(f)\n- else:\n- current_offload_index = {}\n- current_offload_index.update(offload_index)\n-\n- with open(offload_index_file, \"w\", encoding=\"utf-8\") as f:\n- json.dump(current_offload_index, f, indent=2)\n+ save_offload_index(offload_index, offload_folder)\n \n # Load back offloaded state dict on CPU\n- if offload_state_dict and len(state_dict_index) > 0:\n- for param_name, metadata in state_dict_index.items():\n- tensor_file = os.path.join(state_dict_folder, f\"{param_name}.dat\")\n- shape = tuple(metadata[\"shape\"])\n- weight = np.memmap(tensor_file, dtype=metadata[\"dtype\"], mode=\"r\", shape=shape)\n- set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n+ if offload_state_dict:\n+ load_offloaded_weights(model, state_dict_index, state_dict_folder)\n shutil.rmtree(state_dict_folder)\ndiff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nindex 17f33d7c6..660bc521a 100644\n--- a/src/accelerate/utils/offload.py\n+++ b/src/accelerate/utils/offload.py\n@@ -54,6 +54,34 @@ def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str,\n json.dump(current_index, f, indent=2)\n \n \n+def offload_weight(weight, weight_name, offload_folder, index=None):\n+ array = weight.numpy()\n+ tensor_file = os.path.join(offload_folder, f\"{weight_name}.dat\")\n+ if index is not None:\n+ index[weight_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n+ file_array[:] = array[:]\n+ file_array.flush()\n+ return index\n+\n+\n+def save_offload_index(index, offload_folder):\n+ if index is None or len(index) == 0:\n+ # Nothing to save\n+ return\n+\n+ offload_index_file = os.path.join(offload_folder, \"index.json\")\n+ if os.path.isfile(offload_index_file):\n+ with open(offload_index_file, \"r\", encoding=\"utf-8\") as f:\n+ current_index = json.load(f)\n+ else:\n+ current_index = {}\n+ current_index.update(index)\n+\n+ with open(offload_index_file, \"w\", encoding=\"utf-8\") as f:\n+ json.dump(current_index, f, indent=2)\n+\n+\n class PrefixedDataset(Mapping):\n \"\"\"\n Will access keys in a given dataset by adding a prefix.\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/380", "pr_id": 942858478 }, { "diff": "diff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nindex 3f10b83cf..b492f2f9c 100644\n--- a/src/accelerate/utils/imports.py\n+++ b/src/accelerate/utils/imports.py\n@@ -12,8 +12,15 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-\n import importlib\n+import sys\n+\n+\n+# The package importlib_metadata is in a different place, depending on the Python version.\n+if sys.version_info < (3, 8):\n+ import importlib_metadata\n+else:\n+ import importlib.metadata as importlib_metadata\n \n \n try:\n@@ -45,7 +52,15 @@ def is_tpu_available():\n \n \n def is_deepspeed_available():\n- return importlib.util.find_spec(\"deepspeed\") is not None\n+ package_exists = importlib.util.find_spec(\"deepspeed\") is not None\n+ # Check we're not importing a \"deepspeed\" directory somewhere but the actual library by trying to grab the version\n+ # AND checking it has an author field in the metadata that is HuggingFace.\n+ if package_exists:\n+ try:\n+ _ = importlib_metadata.metadata(\"deepspeed\")\n+ return True\n+ except importlib_metadata.PackageNotFoundError:\n+ return False\n \n \n def is_tensorflow_available():\n", "code_comments": [ { "body": "```suggestion\r\n```\r\nIf I'm not mistaken this is unnecessary. You can do `_deepspeed_metadata.get(\"version\")` if it's needed?", "diff_hunk": "@@ -45,7 +52,18 @@ def is_tpu_available():\n \n \n def is_deepspeed_available():\n- return importlib.util.find_spec(\"deepspeed\") is not None\n+ package_exists = importlib.util.find_spec(\"deepspeed\") is not None\n+ # Check we're not importing a \"deepspeed\" directory somewhere but the actual library by trying to grab the version\n+ # AND checking it has an author field in the metadata that is HuggingFace.\n+ if package_exists:\n+ try:\n+ _ = importlib_metadata.version(\"deepspeed\")", "from_author": false }, { "body": "`importlib_metadata.metadata` will also then raise the same error if it's not installed", "diff_hunk": "@@ -45,7 +52,18 @@ def is_tpu_available():\n \n \n def is_deepspeed_available():\n- return importlib.util.find_spec(\"deepspeed\") is not None\n+ package_exists = importlib.util.find_spec(\"deepspeed\") is not None\n+ # Check we're not importing a \"deepspeed\" directory somewhere but the actual library by trying to grab the version\n+ # AND checking it has an author field in the metadata that is HuggingFace.\n+ if package_exists:\n+ try:\n+ _ = importlib_metadata.version(\"deepspeed\")", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/379", "pr_id": 942801550 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex f283dfbc5..ff4522162 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -324,7 +324,7 @@ def deepspeed_launcher(args):\n current_env[\"USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n- current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device)\n+ current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device).lower()\n \n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/378", "pr_id": 942481321 }, { "diff": "diff --git a/docker/accelerate-cpu/Dockerfile b/docker/accelerate-cpu/Dockerfile\nnew file mode 100644\nindex 000000000..06379e69b\n--- /dev/null\n+++ b/docker/accelerate-cpu/Dockerfile\n@@ -0,0 +1,34 @@\n+# Builds CPU-only Docker image of PyTorch\n+# Uses multi-staged approach to reduce size\n+# Stage 1\n+FROM python:3.6-slim as compile-image\n+\n+ARG DEBIAN_FRONTEND=noninteractive\n+\n+RUN apt update\n+RUN apt-get install -y --no-install-recommends \\\n+ build-essential \\\n+ git \\\n+ gcc\n+\n+# Setup virtual environment for Docker\n+ENV VIRTUAL_ENV=/opt/venv\n+RUN python3 -m venv ${VIRTUAL_ENV}\n+# Make sure we use the virtualenv\n+ENV PATH=\"${VIRTUAL_ENV}/bin:$PATH\"\n+\n+WORKDIR /workspace\n+# Install specific CPU torch wheel to save on space\n+RUN python3 -m pip install --upgrade --no-cache-dir pip\n+RUN python3 -m pip install --no-cache-dir \\\n+ jupyter \\\n+ torch --extra-index-url https://download.pytorch.org/whl/cpu \\\n+ git+https://github.com/huggingface/accelerate#egg=accelerate[dev]\n+ \n+# Stage 2\n+FROM python:3.6-slim AS build-image\n+COPY --from=compile-image /opt/venv /opt/venv\n+\n+# Make sure we use the virtualenv\n+ENV PATH=\"/opt/venv/bin:$PATH\"\n+CMD [\"/bin/bash\"]\n\\ No newline at end of file\ndiff --git a/docker/accelerate-gpu/Dockerfile b/docker/accelerate-gpu/Dockerfile\nnew file mode 100644\nindex 000000000..b371042e1\n--- /dev/null\n+++ b/docker/accelerate-gpu/Dockerfile\n@@ -0,0 +1,35 @@\n+# Builds GPU docker image of PyTorch\n+# Uses multi-staged approach to reduce size\n+# Stage 1\n+# Use base conda image to reduce time\n+FROM continuumio/miniconda3:latest AS compile-image\n+# Specify py version\n+ENV PYTHON_VERSION=3.6 \n+# Install apt libs\n+RUN apt-get update && \\\n+ apt-get install -y curl git wget && \\\n+ apt-get clean && \\\n+ rm -rf /var/lib/apt/lists*\n+\n+# Create our conda env\n+RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip\n+# We don't install pytorch here yet since CUDA isn't available\n+# instead we use the direct torch wheel\n+ENV PATH /opt/conda/envs/accelerate/bin:$PATH\n+# Activate our bash shell\n+RUN chsh -s /bin/bash\n+SHELL [\"/bin/bash\", \"-c\"]\n+# Activate the conda env and install torch + accelerate\n+RUN source activate accelerate && \\\n+ python3 -m pip install --no-cache-dir \\\n+ torch --extra-index-url https://download.pytorch.org/whl/cu113 \\\n+ git+https://github.com/huggingface/accelerate#egg=accelerate[dev]\n+\n+# Stage 2\n+FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image\n+COPY --from=compile-image /opt/conda /opt/conda\n+ENV PATH /opt/conda/bin:$PATH\n+\n+RUN echo \"source activate accelerate\" >> /.bashrc\n+# Activate the virtualenv\n+CMD [\"/bin/bash\"]\n\\ No newline at end of file\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I know nothing about Docker, so asking Lysandre for a review :-)", "from_author": false }, { "body": "Great to have docker support! Helpful for production scenarios. Few comments:\r\n1. The Python versions in CPU and GPU are different - 3.6 and 3.8, respectively. Would it be better to have same Python versions? Also, transformers will be discontinuing 3.6 support, so CPU version can be bumped up to match the GPU version?\r\n2. Shouldn't the Dockerfile's be in the top-level directory to be able to copy accelerate codebase and install it? Maybe have `Dockerfile_cpu` and `Dockerfile_gpu` in top-level directory?\r\n3. Getting below error while building GPU image :\r\n```\r\nW: GPG error: https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A4B469963BF863CC\r\nE: The repository 'https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 InRelease' is not signed.\r\n```\r\n", "from_author": false }, { "body": "> Great to have docker support! Helpful for production scenarios. Few comments:\r\n\r\nThese will also be used for having GPU and multi GPU tests, these mimic the transformers docker images for this exact purpose. (This is actually the true purpose behind this PR)\r\n\r\n> 1. The Python versions in CPU and GPU are different - 3.6 and 3.8, respectively. Would it be better to have same Python versions? Also, transformers will be discontinuing 3.6 support, so CPU version can be bumped up to match the GPU version?\r\n\r\nYes they are, it's a partial limitation of the GPU image. I could go through the effort of having it use conda instead, but Sylvain and I discussed that 3.6 support will be dropped on the next release. Since it's so soon, it's easier to just have it be this way. \r\n\r\n> 2. Shouldn't the Dockerfile's be in the top-level directory to be able to copy accelerate codebase and install it? Maybe have `Dockerfile_cpu` and `Dockerfile_gpu` in top-level directory?\r\n\r\nNope, these assume the git repo is the top level directory and then copy it. So when building the dockerfiles it looks something like (assuming from a fresh accelerate clone and cd'd to it) `docker build . -f docker/accelerate-cpu/Dockerfile`. See the transformers repo for a similar behavior\r\n\r\n> 3. Getting below error while building GPU image :\r\n> \r\n> \r\n> ```\r\n> W: GPG error: https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY A4B469963BF863CC\r\n> E: The repository 'https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 InRelease' is not signed.\r\n> ```\r\n\r\nUnsure with this one. Do you have docker cuda properly setup? IIRC you might need to find the right keys to use as well. I was able to build it just fine yesterday.\r\n\r\n", "from_author": true }, { "body": "Thinking on it more, having the python version be configurable in the cuda image would be nice. Will change this\r\n\r\nAlso would probably be better to do similar to this Dockerfile and clone the repo instead: https://github.com/huggingface/transformers/blob/main/docker/transformers-pytorch-gpu/Dockerfile#L11", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/377", "pr_id": 941945997 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex a827633c0..50aeb57b6 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -326,12 +326,21 @@ class DataLoaderDispatcher(DataLoader):\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ shuffle = False\n+ if version.parse(torch.__version__) >= version.parse(\"1.11.0\"):\n+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe\n+\n+ # We need to save the shuffling state of the DataPipe\n+ if isinstance(dataset, ShufflerIterDataPipe):\n+ shuffle = dataset._shuffle_enabled\n super().__init__(dataset, **kwargs)\n self.split_batches = split_batches\n if version.parse(torch.__version__) < version.parse(\"1.8.0\"):\n raise ImportError(\n \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n )\n+ if shuffle:\n+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)\n \n def __iter__(self):\n state = AcceleratorState()\n", "code_comments": [ { "body": "This will break compatibility with older versions of PyTorch as I believe datapipes were just introduced in PyTorch 1.11.", "diff_hunk": "@@ -17,6 +17,7 @@\n \n import torch\n from torch.utils.data import BatchSampler, DataLoader, IterableDataset\n+from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe", "from_author": false }, { "body": "There should be a versioning test of PyTorch first, then this instance check. :-)", "diff_hunk": "@@ -326,12 +327,15 @@ class DataLoaderDispatcher(DataLoader):\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ shuffle = dataset._shuffle_enabled if isinstance(dataset, ShufflerIterDataPipe) else False", "from_author": false }, { "body": "```suggestion\r\n if version.parse(torch.__version__) >= version.parse(\"1.11.99\"):\r\n```\r\nJust in case we get many micro releases ;-)", "diff_hunk": "@@ -326,12 +326,21 @@ class DataLoaderDispatcher(DataLoader):\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ shuffle = False\n+ if version.parse(torch.__version__) >= version.parse(\"1.11.0\"):", "from_author": false }, { "body": "Do you mean `\"1.10.99\"` ?", "diff_hunk": "@@ -326,12 +326,21 @@ class DataLoaderDispatcher(DataLoader):\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ shuffle = False\n+ if version.parse(torch.__version__) >= version.parse(\"1.11.0\"):", "from_author": true }, { "body": "Arf, review before coffee. I read > and thought for a minute it needed PyTorch 1.12, you can ignore :-)", "diff_hunk": "@@ -326,12 +326,21 @@ class DataLoaderDispatcher(DataLoader):\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ shuffle = False\n+ if version.parse(torch.__version__) >= version.parse(\"1.11.0\"):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/376", "pr_id": 941837902 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 6fd24c321..cb904a9b5 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -881,14 +881,15 @@ def get_state_dict(self, model):\n is_zero_3 = self.state.deepspeed_plugin.zero_stage == 3\n \n if is_zero_3:\n- state_dict = model._zero3_consolidated_fp16_state_dict()\n+ state_dict = model._zero3_consolidated_16bit_state_dict()\n else:\n model = self.unwrap_model(model)\n state_dict = model.state_dict()\n \n- for k in state_dict:\n- if state_dict[k].dtype == torch.float16:\n- state_dict[k] = state_dict[k].float()\n+ if state_dict is not None:\n+ for k in state_dict:\n+ if state_dict[k].dtype == torch.float16:\n+ state_dict[k] = state_dict[k].float()\n \n return state_dict\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/374", "pr_id": 941392425 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex ee0716b6d..06edd8540 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -103,6 +103,13 @@ def collate_fn(examples):\n return train_dataloader, eval_dataloader\n \n \n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n def training_function(config, args):\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\ndiff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex 581c5fea0..8a289442d 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -74,6 +74,13 @@ def __exit__(self, *exc):\n # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n \n \n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n def training_function(config, args):\n # Initialize accelerator\n if args.with_tracking:\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 6f3f3e0cb..140f14d75 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import argparse\n+import os\n \n import torch\n from torch.utils.data import DataLoader\n@@ -103,6 +104,13 @@ def collate_fn(examples):\n return train_dataloader, eval_dataloader\n \n \n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n def training_function(config, args):\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex db94d5773..6babf879a 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -13,6 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import argparse\n+import os\n \n import torch\n from torch.utils.data import DataLoader\n@@ -104,6 +105,13 @@ def collate_fn(examples):\n return train_dataloader, eval_dataloader\n \n \n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n def training_function(config, args):\n # Initialize accelerator\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 47e15c0cf..1ccc2c1b0 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -103,6 +103,13 @@ def collate_fn(examples):\n return train_dataloader, eval_dataloader\n \n \n+# For testing only\n+if os.environ.get(\"TESTING_MOCKED_DATALOADERS\", None) == \"1\":\n+ from accelerate.test_utils.training import mocked_dataloaders\n+\n+ get_dataloaders = mocked_dataloaders # noqa: F811\n+\n+\n def training_function(config, args):\n # Initialize Accelerator\n \ndiff --git a/src/accelerate/test_utils/training.py b/src/accelerate/test_utils/training.py\nindex 2ea86fbb1..c26587abe 100644\n--- a/src/accelerate/test_utils/training.py\n+++ b/src/accelerate/test_utils/training.py\n@@ -14,6 +14,11 @@\n \n import numpy as np\n import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate.utils.dataclasses import DistributedType\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n \n \n class RegressionDataset:\n@@ -43,3 +48,40 @@ def forward(self, x=None):\n print(f\"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}\")\n self.first_batch = False\n return x * self.a + self.b\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex b9759f8b5..af892f4eb 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -12,29 +12,19 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import ast\n import os\n-import sys\n+import re\n+import shutil\n+import subprocess\n import tempfile\n import unittest\n from unittest import mock\n \n-from torch.utils.data import DataLoader\n-\n-from accelerate import DistributedType\n from accelerate.test_utils.examples import compare_against_test\n from accelerate.test_utils.testing import TempDirTestCase, slow\n-from datasets import load_dataset\n-from transformers import AutoTokenizer\n-\n+from accelerate.utils import write_basic_config\n \n-SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n-sys.path.extend(SRC_DIRS)\n-\n-if SRC_DIRS is not None:\n- import checkpointing\n- import cross_validation\n- import multi_process_metrics\n- import tracking\n \n # DataLoaders built from `test_samples/MRPC` for quick testing\n # Should mock `{script_name}.get_dataloaders` via:\n@@ -43,43 +33,6 @@\n EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\", \"memory.py\", \"fsdp_with_peak_mem_tracking.py\"]\n \n \n-def mocked_dataloaders(accelerator, batch_size: int = 16):\n- tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n- data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n- datasets = load_dataset(\"csv\", data_files=data_files)\n- label_list = datasets[\"train\"].unique(\"label\")\n-\n- label_to_id = {v: i for i, v in enumerate(label_list)}\n-\n- def tokenize_function(examples):\n- # max_length=None => use the model max length (it's actually the default)\n- outputs = tokenizer(\n- examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n- )\n- if \"label\" in examples:\n- outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n- return outputs\n-\n- # Apply the method we just defined to all the examples in all the splits of the dataset\n- tokenized_datasets = datasets.map(\n- tokenize_function,\n- batched=True,\n- remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n- )\n-\n- def collate_fn(examples):\n- # On TPU it's best to pad everything to the same length or training will be very slow.\n- if accelerator.distributed_type == DistributedType.TPU:\n- return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n- return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n-\n- # Instantiate dataloaders.\n- train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n- eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n-\n- return train_dataloader, eval_dataloader\n-\n-\n class ExampleDifferenceTests(unittest.TestCase):\n \"\"\"\n This TestCase checks that all of the `complete_*` scripts contain all of the\n@@ -159,88 +112,89 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n+@mock.patch.dict(os.environ, {\"TESTING_MOCKED_DATALOADERS\": \"1\"})\n class FeatureExamplesTests(TempDirTestCase):\n clear_on_setup = False\n \n- @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ @classmethod\n+ def setUpClass(cls):\n+ super().setUpClass()\n+ cls._tmpdir = tempfile.mkdtemp()\n+ cls.configPath = os.path.join(cls._tmpdir, \"default_config.yml\")\n+\n+ write_basic_config(save_location=cls.configPath)\n+ cls._launch_args = [\"accelerate\", \"launch\", \"--config_file\", cls.configPath]\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ super().tearDownClass()\n+ shutil.rmtree(cls._tmpdir)\n+\n def test_checkpointing_by_epoch(self):\n testargs = f\"\"\"\n- checkpointing.py\n+ examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n \"\"\".split()\n- with mock.patch.object(sys, \"argv\", testargs):\n- checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n+ _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n \n- @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n def test_checkpointing_by_steps(self):\n testargs = f\"\"\"\n- checkpointing.py\n+ examples/by_feature/checkpointing.py\n --checkpointing_steps 2\n --output_dir {self.tmpdir}\n \"\"\".split()\n- with mock.patch.object(sys, \"argv\", testargs):\n- checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+ _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE, env=os.environ)\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n \n- @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n def test_load_states_by_epoch(self):\n testargs = f\"\"\"\n- checkpointing.py\n+ examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n \"\"\".split()\n- dummy_results = {\"accuracy\": mock.ANY, \"f1\": mock.ANY}\n- with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n- with mock.patch.object(sys, \"argv\", testargs):\n- checkpointing.main()\n- with self.assertRaises(AssertionError):\n- mocked_print.assert_any_call(\"epoch 0:\", dummy_results)\n- with self.assertRaises(AssertionError):\n- mocked_print.assert_any_call(\"epoch 1:\", dummy_results)\n- mocked_print.assert_any_call(\"epoch 2:\", dummy_results)\n-\n- @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ output = subprocess.run(\n+ self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n+ ).stdout\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertNotIn(\"epoch 1:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n+\n def test_load_states_by_steps(self):\n testargs = f\"\"\"\n- checkpointing.py\n+ examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_4\")}\n \"\"\".split()\n- dummy_results = {\"accuracy\": mock.ANY, \"f1\": mock.ANY}\n- with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n- with mock.patch.object(sys, \"argv\", testargs):\n- checkpointing.main()\n- with self.assertRaises(AssertionError):\n- mocked_print.assert_any_call(\"epoch 0:\", dummy_results)\n- mocked_print.assert_any_call(\"epoch 1:\", dummy_results)\n- mocked_print.assert_any_call(\"epoch 2:\", dummy_results)\n+ output = subprocess.run(\n+ self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n+ ).stdout\n+ self.assertNotIn(\"epoch 0:\", output)\n+ self.assertIn(\"epoch 1:\", output)\n+ self.assertIn(\"epoch 2:\", output)\n \n @slow\n def test_cross_validation(self):\n testargs = \"\"\"\n- cross_validation.py\n+ examples/by_feature/cross_validation.py\n --num_folds 2\n \"\"\".split()\n- with mock.patch.object(sys, \"argv\", testargs):\n- with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n- cross_validation.main()\n- call = mocked_print.mock_calls[-1]\n- self.assertGreaterEqual(call.args[1][\"accuracy\"], 0.75)\n+ with mock.patch.dict(os.environ, {\"TESTING_MOCKED_DATALOADERS\": \"0\"}):\n+ output = subprocess.run(\n+ self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n+ ).stdout\n+ results = ast.literal_eval(re.findall(\"({.+})\", output)[-1])\n+ self.assertGreaterEqual(results[\"accuracy\"], 0.75)\n \n- @mock.patch(\"multi_process_metrics.get_dataloaders\", mocked_dataloaders)\n def test_multi_process_metrics(self):\n- testargs = [\"multi_process_metrics.py\"]\n- with mock.patch.object(sys, \"argv\", testargs):\n- multi_process_metrics.main()\n+ testargs = [\"examples/by_feature/multi_process_metrics.py\"]\n+ _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n \n- @mock.patch(\"tracking.get_dataloaders\", mocked_dataloaders)\n def test_tracking(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n testargs = f\"\"\"\n- tracking.py\n+ examples/by_feature/tracking.py\n --with_tracking\n --logging_dir {tmpdir}\n \"\"\".split()\n- with mock.patch.object(sys, \"argv\", testargs):\n- tracking.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n+ _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE)\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n", "code_comments": [ { "body": "Since it's for testing only, maybe add it in the name of the env variable? `TESTING_MOCKED_DATALOADERS` for instance.", "diff_hunk": "@@ -103,6 +103,13 @@ def collate_fn(examples):\n return train_dataloader, eval_dataloader\n \n \n+# For testing only\n+if os.environ.get(\"USE_MOCKED_DATALOADERS\", None) == \"1\":", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -74,6 +74,13 @@ def __exit__(self, *exc):\n # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n \n \n+# For testing only\n+if os.environ.get(\"USE_MOCKED_DATALOADERS\", None) == \"1\":", "from_author": false }, { "body": "```suggestion\r\n super().setUpClass()\r\n```\r\nWe're not in Python 2 ;-)", "diff_hunk": "@@ -159,88 +112,89 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n+@mock.patch.dict(os.environ, {\"USE_MOCKED_DATALOADERS\": \"1\"})\n class FeatureExamplesTests(TempDirTestCase):\n clear_on_setup = False\n \n- @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ @classmethod\n+ def setUpClass(cls):\n+ super(FeatureExamplesTests, cls).setUpClass()", "from_author": false }, { "body": "```suggestion\r\n super().tearDownClass()\r\n```", "diff_hunk": "@@ -159,88 +112,89 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n+@mock.patch.dict(os.environ, {\"USE_MOCKED_DATALOADERS\": \"1\"})\n class FeatureExamplesTests(TempDirTestCase):\n clear_on_setup = False\n \n- @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ @classmethod\n+ def setUpClass(cls):\n+ super(FeatureExamplesTests, cls).setUpClass()\n+ cls._tmpdir = tempfile.mkdtemp()\n+ cls.configPath = os.path.join(cls._tmpdir, \"default_config.yml\")\n+\n+ write_basic_config(save_location=cls.configPath)\n+ cls._launch_args = [\"accelerate\", \"launch\", \"--config_file\", cls.configPath]\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ super(FeatureExamplesTests, cls).tearDownClass()", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/373", "pr_id": 940534130 }, { "diff": "diff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex 94803a62f..9b496b30e 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -67,3 +67,5 @@ The main work on your PyTorch `DataLoader` is done by the following function:\n [[autodoc]] utils.synchronize_rng_states\n \n [[autodoc]] utils.wait_for_everyone\n+\n+[[autodoc]] utils.write_basic_config\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex 58b33c1ad..f65b6b115 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -72,5 +72,12 @@\n \n from .launch import PrepareForLaunch\n from .memory import find_executable_batch_size\n-from .other import extract_model_from_parallel, get_pretty_name, patch_environment, save, wait_for_everyone\n+from .other import (\n+ extract_model_from_parallel,\n+ get_pretty_name,\n+ patch_environment,\n+ save,\n+ wait_for_everyone,\n+ write_basic_config,\n+)\n from .random import set_seed, synchronize_rng_state, synchronize_rng_states\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nindex 35c96a21a..0d4f5b7e2 100644\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -14,9 +14,12 @@\n \n import os\n from contextlib import contextmanager\n+from pathlib import Path\n \n import torch\n \n+from ..commands.config.cluster import ClusterConfig\n+from ..commands.config.config_args import default_json_config_file\n from ..state import AcceleratorState\n from .dataclasses import DistributedType\n from .imports import is_deepspeed_available, is_tpu_available\n@@ -109,3 +112,45 @@ def get_pretty_name(obj):\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n+\n+\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file):\n+ \"\"\"\n+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n+ set CPU if it is a CPU-only machine.\n+\n+ Args:\n+ mixed_precision (`str`, *optional*, defaults to \"no\"):\n+ Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n+ save_location (`str`, *optional*, defaults to `default_json_config_file`):\n+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default\n+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting\n+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.\n+ \"\"\"\n+ path = Path(save_location)\n+ path.parent.mkdir(parents=True, exist_ok=True)\n+ if path.exists():\n+ print(\n+ f\"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.\"\n+ )\n+ return\n+ mixed_precision = mixed_precision.lower()\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(f\"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}\")\n+ config = {\"compute_environment\": \"LOCAL_MACHINE\", \"mixed_precision\": mixed_precision}\n+ if torch.cuda.is_available():\n+ num_gpus = torch.cuda.device_count()\n+ config[\"num_processes\"] = num_gpus\n+ config[\"use_cpu\"] = False\n+ if num_gpus > 1:\n+ config[\"distributed_type\"] = \"MULTI_GPU\"\n+ else:\n+ config[\"distributed_type\"] = \"NO\"\n+ else:\n+ num_gpus = 0\n+ config[\"use_cpu\"] = True\n+ config[\"num_processes\"] = 1\n+ config[\"distributed_type\"] = \"NO\"\n+ if not path.exists():\n+ config = ClusterConfig(**config)\n+ config.to_json_file(path)\n", "code_comments": [ { "body": "This utility should take an optional path to store the config (you can pass any config file to `accelerate launch` with the arg `--config_file`.", "diff_hunk": "@@ -109,3 +112,41 @@ def get_pretty_name(obj):\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n+\n+\n+def write_basic_config(mixed_precision=\"no\"):\n+ \"\"\"\n+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n+ set CPU if it is a CPU-only machine.\n+\n+ Args:\n+ mixed_precision (`str`, *optional*, defaults to \"no\"):\n+ Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n+ \"\"\"\n+ path = Path(default_json_config_file)\n+ path.parent.mkdir(parents=True, exist_ok=True)\n+ if path.exists():\n+ print(\n+ \"User configuration already setup, will not override existing configuration. Run `accelerate config` manually.\"\n+ )\n+ return", "from_author": false }, { "body": "This is the default already, so I don't think this part is useful.", "diff_hunk": "@@ -109,3 +112,45 @@ def get_pretty_name(obj):\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n+\n+\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file):\n+ \"\"\"\n+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n+ set CPU if it is a CPU-only machine.\n+\n+ Args:\n+ mixed_precision (`str`, *optional*, defaults to \"no\"):\n+ Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n+ save_location (`str`, *optional*):\n+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`.\n+ \"\"\"\n+ if save_location is None:\n+ save_location = default_json_config_file", "from_author": false }, { "body": "The default should be indicated there.", "diff_hunk": "@@ -109,3 +112,45 @@ def get_pretty_name(obj):\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n+\n+\n+def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_config_file):\n+ \"\"\"\n+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also\n+ set CPU if it is a CPU-only machine.\n+\n+ Args:\n+ mixed_precision (`str`, *optional*, defaults to \"no\"):\n+ Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n+ save_location (`str`, *optional*):\n+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`.", "from_author": false }, { "body": "This won't be the default location for everyone, so I'd just say defaults to the default config path and expand a bit in the docstring that it's inside the default cache of hugging face, and may add which env variable control it.", "diff_hunk": "@@ -122,11 +122,9 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n Args:\n mixed_precision (`str`, *optional*, defaults to \"no\"):\n Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n- save_location (`str`, *optional*):\n+ save_location (`str`, *optional*, defaults to \"~/.cache/huggingface/accelerate/default_config.yaml\"):", "from_author": false }, { "body": "```suggestion\r\n location is inside the huggingface cache folder (`~/.cache/huggingface` but can be overriden by setting the \r\n `HF_HOME` environmental variable) followed by `accelerate/default_config.yaml`.\r\n```", "diff_hunk": "@@ -122,8 +122,10 @@ def write_basic_config(mixed_precision=\"no\", save_location: str = default_json_c\n Args:\n mixed_precision (`str`, *optional*, defaults to \"no\"):\n Mixed Precision to use. Should be one of \"no\", \"fp16\", or \"bf16\"\n- save_location (`str`, *optional*, defaults to \"~/.cache/huggingface/accelerate/default_config.yaml\"):\n- Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`.\n+ save_location (`str`, *optional*, defaults to `default_json_config_file`):\n+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default\n+ location is inside the huggingface cache folder as default_config.yaml, can be overriden by setting the \n+ `HF_HOME` environmental variable.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/371", "pr_id": 940120765 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 6fd24c321..b9780330a 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -881,7 +881,7 @@ def get_state_dict(self, model):\n is_zero_3 = self.state.deepspeed_plugin.zero_stage == 3\n \n if is_zero_3:\n- state_dict = model._zero3_consolidated_fp16_state_dict()\n+ state_dict = model._zero3_consolidated_16bit_state_dict()\n else:\n model = self.unwrap_model(model)\n state_dict = model.state_dict()\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "> Thanks for fixing! Should we have a minimal version of DeepSpeed? I don't think this was always supported.\r\n\r\nHello, yes, we should have minimal version of DeepSpeed. I will club it with all the other changes that I am planning to have to improve the DeeSpeed integration.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/370", "pr_id": 939591326 }, { "diff": "diff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex b0e2c04a2..b043d5d20 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -102,7 +102,7 @@ def save_accelerator_state(\n states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state_all()\n # ^^ safe to call this function even if cuda is not available\n if is_tpu_available():\n- states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ states[\"xm_seed\"] = xm.get_rng_state()\n output_states_file = os.path.join(output_dir, states_name)\n torch.save(states, output_states_file)\n logger.info(f\"Random states saved in {output_states_file}\")\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/365", "pr_id": 936006399 }, { "diff": "diff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex cfd8929d9..d3cd2e1a2 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -80,7 +80,7 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n else:\n if num_processes is None:\n raise ValueError(\n- \"You have to specify the number of GPUs you would like to use, add `num_process=...` to your call.\"\n+ \"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.\"\n )\n \n if num_processes > 1:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/363", "pr_id": 935487633 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex a8546c3c8..581c5fea0 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -103,11 +103,12 @@ def training_function(config, args):\n \n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.print(run)\n- accelerator.init_trackers(run, config)\n+ if accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n \n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n datasets = load_dataset(\"glue\", \"mrpc\")\n@@ -293,7 +294,7 @@ def collate_fn(examples):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss,\n+ \"train_loss\": total_loss.item(),\n },\n step=epoch,\n )\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 19f8e9dea..47e15c0cf 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -161,10 +161,11 @@ def training_function(config, args):\n # New Code #\n # We need to initalize the trackers we use. Overall configurations can also be stored\n if args.with_tracking:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.init_trackers(run, config)\n+ if accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Now we train the model\n for epoch in range(num_epochs):\n@@ -208,15 +209,16 @@ def training_function(config, args):\n \n # New Code #\n # To actually log, we call `Accelerator.log`\n- # The values passed can be of `str`, `int`, or `float`\n+ # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`\n if args.with_tracking:\n accelerator.log(\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss,\n+ \"train_loss\": total_loss.item(),\n \"epoch\": epoch,\n- }\n+ },\n+ step=epoch,\n )\n \n # New Code #\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 79e9289b8..fd830442d 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -104,10 +104,11 @@ def training_function(config, args):\n \n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.init_trackers(run, config)\n+ if accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Grab all the image filenames\n file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(\".jpg\")]\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex a231566dc..abf85c8d9 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -81,10 +81,11 @@ def training_function(config, args):\n \n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- run = os.path.split(__file__)[-1].split(\".\")[0]\n- if args.logging_dir:\n- run = os.path.join(args.logging_dir, run)\n- accelerator.init_trackers(run, config)\n+ if accelerator.is_main_process:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\n@@ -245,9 +246,10 @@ def collate_fn(examples):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss,\n+ \"train_loss\": total_loss.item(),\n \"epoch\": epoch,\n- }\n+ },\n+ step=epoch,\n )\n \n if checkpointing_steps == \"epoch\":\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 218ddf2df..3efff77c9 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -136,8 +136,9 @@ def log(self, values: dict, step: Optional[int] = None):\n Logs `values` to the current run.\n \n Args:\n- values (Dictionary `str` to `str`, `float`, or `int`):\n- Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of\n+ `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n \"\"\"\n@@ -146,6 +147,8 @@ def log(self, values: dict, step: Optional[int] = None):\n self.writer.add_scalar(k, v, global_step=step)\n elif isinstance(v, str):\n self.writer.add_text(k, v, global_step=step)\n+ elif isinstance(v, dict):\n+ self.writer.add_scalars(k, v, global_step=step)\n self.writer.flush()\n logger.info(\"Successfully logged to TensorBoard\")\n \n@@ -170,7 +173,7 @@ class WandBTracker(GeneralTracker):\n \n def __init__(self, run_name: str):\n self.run_name = run_name\n- self.run = wandb.init(self.run_name)\n+ self.run = wandb.init(project=self.run_name)\n logger.info(f\"Initialized WandB project {self.run_name}\")\n logger.info(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n@@ -193,8 +196,9 @@ def log(self, values: dict, step: Optional[int] = None):\n Logs `values` to the current run.\n \n Args:\n- values (Dictionary `str` to `str`, `float`, or `int`):\n- Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of\n+ `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n \"\"\"\n@@ -247,14 +251,21 @@ def log(self, values: dict, step: Optional[int] = None):\n Logs `values` to the current run.\n \n Args:\n- values (Dictionary `str` to `str`, `float`, or `int`):\n- Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of\n+ `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n \"\"\"\n if step is not None:\n self.writer.set_step(step)\n- self.writer.log_others(values)\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.log_metric(k, v, step=step)\n+ elif isinstance(v, str):\n+ self.writer.log_other(k, v)\n+ elif isinstance(v, dict):\n+ self.writer.log_metrics(v, step=step)\n logger.info(\"Successfully logged to CometML\")\n \n def finish(self):\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 84b6adaef..b9759f8b5 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -149,9 +149,10 @@ def test_cv_examples(self):\n \" \" * 16 + \"{\\n\\n\",\n \" \" * 18 + '\"accuracy\": eval_metric[\"accuracy\"],\\n\\n',\n \" \" * 18 + '\"f1\": eval_metric[\"f1\"],\\n\\n',\n- \" \" * 18 + '\"train_loss\": total_loss,\\n\\n',\n+ \" \" * 18 + '\"train_loss\": total_loss.item(),\\n\\n',\n \" \" * 18 + '\"epoch\": epoch,\\n\\n',\n- \" \" * 16 + \"}\\n\",\n+ \" \" * 16 + \"},\\n\\n\",\n+ \" \" * 16 + \"step=epoch,\\n\",\n \" \" * 8,\n ]\n self.one_complete_example(\"complete_cv_example.py\", True, cv_path, special_strings)\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex e9a2170ee..f02db8d21 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -203,6 +203,9 @@ def get_value_from_key(log_list, key: str, is_param: bool = False):\n if \"log_other\" in j.keys():\n if j[\"log_other\"][\"key\"] == key:\n return j[\"log_other\"][\"val\"]\n+ if \"metric\" in j.keys():\n+ if j[\"metric\"][\"metricName\"] == key:\n+ return j[\"metric\"][\"metricValue\"]\n \n def test_init_trackers(self):\n with tempfile.TemporaryDirectory() as d:\n", "code_comments": [ { "body": "Do we really have either a Tensor or something else? Can't it always be `.item()` here?", "diff_hunk": "@@ -293,7 +294,7 @@ def collate_fn(examples):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss,\n+ \"train_loss\": total_loss.item() if type(total_loss) == torch.Tensor else total_loss,", "from_author": false }, { "body": "Same comment", "diff_hunk": "@@ -208,15 +209,16 @@ def training_function(config, args):\n \n # New Code #\n # To actually log, we call `Accelerator.log`\n- # The values passed can be of `str`, `int`, or `float`\n+ # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`\n if args.with_tracking:\n accelerator.log(\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss,\n+ \"train_loss\": total_loss.item() if type(total_loss) == torch.Tensor else total_loss,", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -245,9 +246,10 @@ def collate_fn(examples):\n {\n \"accuracy\": eval_metric[\"accuracy\"],\n \"f1\": eval_metric[\"f1\"],\n- \"train_loss\": total_loss,\n+ \"train_loss\": total_loss.item() if type(total_loss) == torch.Tensor else total_loss,", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Oh, also make sure to edit the title of the PR ;-)", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/361", "pr_id": 934934402 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex e3ce3fd62..f283dfbc5 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -33,6 +33,8 @@\n PrepareForLaunch,\n is_sagemaker_available,\n )\n+from accelerate.utils.versions import torch_version\n+from packaging import version\n \n \n def launch_command_parser(subparsers=None):\n@@ -131,6 +133,12 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\",\n )\n+ parser.add_argument(\n+ \"--num_cpu_threads_per_process\",\n+ type=int,\n+ default=1,\n+ help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\n+ )\n parser.add_argument(\n \"--aws_access_key_id\",\n type=str,\n@@ -211,7 +219,12 @@ def simple_launcher(args):\n \n \n def multi_gpu_launcher(args):\n- cmd = [sys.executable, \"-m\", \"torch.distributed.launch\", \"--use_env\"]\n+ if torch_version >= version.parse(\"1.10.0\"):\n+ cmd = [\"torchrun\"]\n+ elif torch_version >= version.parse(\"1.9.0\"):\n+ cmd = [sys.executable, \"-m\", \"torch.distributed.run\"]\n+ else:\n+ cmd = [sys.executable, \"-m\", \"torch.distributed.launch\", \"--use_env\"]\n if args.num_machines > 1:\n cmd.extend(\n [\n@@ -259,6 +272,7 @@ def multi_gpu_launcher(args):\n current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.offload_params).lower()\n current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.min_num_params)\n current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.sharding_strategy)\n+ current_env[\"OMP_NUM_THREADS\"] = str(args.num_cpu_threads_per_process)\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\ndiff --git a/src/accelerate/utils/versions.py b/src/accelerate/utils/versions.py\nnew file mode 100644\nindex 000000000..36dd7ac48\n--- /dev/null\n+++ b/src/accelerate/utils/versions.py\n@@ -0,0 +1,25 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import sys\n+\n+from packaging.version import parse\n+\n+\n+if sys.version_info < (3, 8):\n+ import importlib_metadata\n+else:\n+ import importlib.metadata as importlib_metadata\n+\n+torch_version = parse(importlib_metadata.version(\"torch\"))\n", "code_comments": [ { "body": "No need to make it private since we reuse it.", "diff_hunk": "@@ -0,0 +1,25 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import sys\n+\n+from packaging.version import parse\n+\n+\n+if sys.version_info < (3, 8):\n+ import importlib_metadata\n+else:\n+ import importlib.metadata as importlib_metadata\n+\n+_torch_version = parse(importlib_metadata.version(\"torch\"))", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/360", "pr_id": 934880222 }, { "diff": "diff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex b74c683b4..cb38abce6 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -10,6 +10,7 @@\n DeepSpeedPlugin,\n DistributedDataParallelKwargs,\n DistributedType,\n+ FullyShardedDataParallelPlugin,\n GradScalerKwargs,\n InitProcessGroupKwargs,\n find_executable_batch_size,\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex ba34587e2..6fd24c321 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -163,15 +163,18 @@ def __init__(\n assert isinstance(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n+ os.environ[\"USE_DEEPSPEED\"] = \"true\" # use DeepSpeed if plugin is provided\n+\n+ if fsdp_plugin is None: # init from env variables\n+ fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get(\"USE_FSDP\", \"false\") == \"true\" else None\n+ else:\n+ if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n+ raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\n+ os.environ[\"USE_FSDP\"] = \"true\" # use FSDP if plugin is provided\n \n if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n- if fsdp_plugin is None: # init from env variables\n- fsdp_plugin = FullyShardedDataParallelPlugin()\n- else:\n- if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n- raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\n \n # Kwargs handlers\n self.ddp_handler = None\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Thanks a lot!! @sgugger @pacman100 ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/356", "pr_id": 933112670 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex e39d3ede8..a8546c3c8 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -263,6 +263,7 @@ def collate_fn(examples):\n # context manager to track the peak memory usage during the evaluation\n with TorchTracemalloc() as tracemalloc:\n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n@@ -270,7 +271,15 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather(\n+ (predictions, batch[\"labels\"])\n+ ) # If we are in a multiprocess environment, the last batch has duplicates\n+ if accelerator.num_processes > 1:\n+ if step == len(eval_dataloader) - 1:\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex 1b5bf1d58..db94d5773 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -177,21 +177,13 @@ def training_function(config, args):\n # First we check if it's a distributed system\n if accelerator.num_processes > 1:\n # Then see if we're on the last batch of our eval dataloader\n- if step == len(eval_dataloader):\n+ if step == len(eval_dataloader) - 1:\n # Last batch needs to be truncated on distributed systems as it contains additional samples\n predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n references = references[: len(eval_dataloader.dataset) - samples_seen]\n else:\n # Otherwise we add the number of samples seen\n samples_seen += references.shape[0]\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n- # If we are in a multiprocess environment, the last batch has duplicates\n- if accelerator.num_processes > 1:\n- if step == len(eval_dataloader):\n- predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n- references = references[: len(eval_dataloader.dataset) - samples_seen]\n- else:\n- samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex adb46d957..79e9289b8 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -242,7 +242,7 @@ def training_function(config, args):\n predictions = outputs.argmax(dim=-1)\n predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n if accelerator.num_processes > 1:\n- if step == len(eval_dataloader):\n+ if step == len(eval_dataloader) - 1:\n predictions = predictions[: len(eval_dataloader) - samples_seen]\n references = references[: len(eval_dataloader) - samples_seen]\n else:\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex d4c8ddf0d..a231566dc 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -227,7 +227,7 @@ def collate_fn(examples):\n (predictions, batch[\"labels\"])\n ) # If we are in a multiprocess environment, the last batch has duplicates\n if accelerator.num_processes > 1:\n- if step == len(eval_dataloader):\n+ if step == len(eval_dataloader) - 1:\n predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n references = references[: len(eval_dataloader.dataset) - samples_seen]\n else:\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/355", "pr_id": 932032919 }, { "diff": "diff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nindex abba1a746..1b5bf1d58 100644\n--- a/examples/by_feature/multi_process_metrics.py\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -184,6 +184,14 @@ def training_function(config, args):\n else:\n # Otherwise we add the number of samples seen\n samples_seen += references.shape[0]\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ # If we are in a multiprocess environment, the last batch has duplicates\n+ if accelerator.num_processes > 1:\n+ if step == len(eval_dataloader):\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 80a083a8d..adb46d957 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -232,7 +232,7 @@ def training_function(config, args):\n accelerator.save_state(output_dir)\n model.eval()\n accurate = 0\n- num_elems = 0\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n@@ -240,11 +240,19 @@ def training_function(config, args):\n with torch.no_grad():\n outputs = model(inputs)\n predictions = outputs.argmax(dim=-1)\n- accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n- num_elems += accurate_preds.shape[0]\n+ predictions, references = accelerator.gather((predictions, batch[\"label\"]))\n+ if accelerator.num_processes > 1:\n+ if step == len(eval_dataloader):\n+ predictions = predictions[: len(eval_dataloader) - samples_seen]\n+ references = references[: len(eval_dataloader) - samples_seen]\n+ else:\n+ samples_seen += references.shape[0]\n+ else:\n+ samples_seen += references.shape[0]\n+ accurate_preds = predictions == references\n accurate += accurate_preds.long().sum()\n \n- eval_metric = accurate.item() / num_elems\n+ eval_metric = accurate.item() / samples_seen\n # Use accelerator.print to print only on the main process.\n accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n if args.with_tracking:\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 1910a4f83..d4c8ddf0d 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -215,6 +215,7 @@ def collate_fn(examples):\n accelerator.save_state(output_dir)\n \n model.eval()\n+ samples_seen = 0\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n@@ -222,7 +223,15 @@ def collate_fn(examples):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n # It is slightly faster to call this once, than multiple times\n- predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ predictions, references = accelerator.gather(\n+ (predictions, batch[\"labels\"])\n+ ) # If we are in a multiprocess environment, the last batch has duplicates\n+ if accelerator.num_processes > 1:\n+ if step == len(eval_dataloader):\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ samples_seen += references.shape[0]\n metric.add_batch(\n predictions=predictions,\n references=references,\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/352", "pr_id": 931292768 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 8ff69e048..01a55e646 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -133,17 +133,24 @@ def get_cluster_input():\n main_training_function = \"main\"\n \n if distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.TPU]:\n- machine_type = str(distributed_type).split(\".\")[0].replace(\"MULTI_\", \"\")\n+ machine_type = str(distributed_type).split(\".\")[1].replace(\"MULTI_\", \"\")\n if machine_type == \"TPU\":\n machine_type += \" cores\"\n else:\n- machine_type += \"'s\"\n+ machine_type += \"(s)\"\n num_processes = _ask_field(\n f\"How many {machine_type} should be used for distributed training? [1]:\",\n lambda x: int(x),\n default=1,\n error_message=\"Please enter an integer.\",\n )\n+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED]:\n+ num_processes = _ask_field(\n+ \"How many GPU(s) should be used for distributed training? [1]:\",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n else:\n num_processes = 1\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/347", "pr_id": 929624461 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex d35024d66..130659196 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -7,6 +7,8 @@\n title: Installation\n title: Get started\n - sections:\n+ - local: big_modeling\n+ title: Handling big models\n - local: sagemaker\n title: Amazon SageMaker\n title: Guides\ndiff --git a/docs/source/big_modeling.mdx b/docs/source/big_modeling.mdx\nnew file mode 100644\nindex 000000000..4ca316870\n--- /dev/null\n+++ b/docs/source/big_modeling.mdx\n@@ -0,0 +1,232 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Handling big models\n+\n+When loading a pretrained model in PyTorch, the usual workflow looks like this:\n+\n+```py\n+import torch\n+\n+my_model = ModelClass(...)\n+state_dict = torch.load(checkpoint_file)\n+my_model.load_state_dict(state_dict)\n+```\n+\n+In plain English, those steps are:\n+1. Create the model with randomly initialized weights\n+2. Load the model weights (in a dictionary usually called a state dict) from the disk\n+3. Load those weights inside the model\n+\n+While this works very well for regularly sized models, this workflow has some clear limitation when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pretrained weights. If you're loading a model with 6 billions parameters, this needs you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).\n+\n+<Tip warning={true}>\n+\n+This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.\n+\n+</Tip>\n+\n+## Instantiating an empty model\n+\n+The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:\n+\n+```py\n+from accelerate import init_empty_weights\n+\n+with init_empty_weights():\n+ my_model = ModelClass(...)\n+```\n+\n+For instance:\n+\n+```py\n+with init_empty_weights():\n+ model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+```\n+\n+initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved on that device.\n+\n+<Tip warning={true}>\n+\n+You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device.\n+\n+</Tip>\n+\n+## Sharded checkpoints\n+\n+It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split in several smaller files that we call checkpoint shards.\n+\n+Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. For instance we could have a folder containing:\n+\n+```bash\n+first_state_dict.bin\n+index.json\n+second_state_dict.bin\n+```\n+\n+with index.json being the following file:\n+\n+```\n+{\n+ \"linear1.weight\": \"first_state_dict.bin\",\n+ \"linear1.bias\": \"first_state_dict.bin\",\n+ \"linear2.weight\": \"second_state_dict.bin\",\n+ \"linear2.bias\": \"second_state_dict.bin\"\n+}\n+```\n+\n+and `first_state_dict.bin` containing the weights for `\"linear1.weight\"` and `\"linear1.bias\"`, `second_state_dict.bin` the ones for `\"linear2.weight\"` and `\"linear2.bias\"`\n+\n+## Loading weights\n+\n+The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.\n+\n+Here is how we can use this to load the [GPT-J-6B](https://huggingface.co/EleutherAI/gpt-j-6B) model. You clone the sharded version of this model with:\n+\n+```bash\n+git clone https://huggingface.co/sgugger/sharded-gpt-j-6B\n+cd sharded-gpt-j-6B\n+git-lfs install\n+git pull\n+```\n+\n+then we can initialize the model with\n+\n+```py\n+from accelerate import init_empty_weights\n+from transformers import AutoConfig, AutoModelForCausalLM\n+\n+checkpoint = \"EleutherAI/gpt-j-6B\"\n+config = AutoConfig.from_pretrained(checkpoint)\n+\n+with init_empty_weights():\n+ model = AutoModelForCausalLM.from_config(config)\n+```\n+\n+and load the checkpoint we just downloaded with:\n+\n+```py\n+from accelerate import load_checkpoint_and_dispatch\n+\n+model = load_checkpoint_and_dispatch(\n+ model, \"sharded-gpt-j-6B\", device_map=\"auto\", no_split_module_classes=[\"GPTJBlock\"]\n+)\n+```\n+\n+By passing `device_map=\"auto\"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources:\n+- first we use the maximum space available on the GPU(s)\n+- if we still need space, we store the remaining weights on the CPU\n+- if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors\n+\n+`no_split_module_classes=[\"GPTJBlock\"]` indicates that the modules that are `GPTJBlock` should not be split on different devices. You should set here all blocks that include a residual connection of some kind.\n+\n+You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model:\n+\n+```py\n+model.hf_device_map\n+```\n+\n+```python out\n+{'transformer.wte': 0,\n+ 'transformer.drop': 0,\n+ 'transformer.h.0': 0,\n+ 'transformer.h.1': 0,\n+ 'transformer.h.2': 0,\n+ 'transformer.h.3': 0,\n+ 'transformer.h.4': 0,\n+ 'transformer.h.5': 0,\n+ 'transformer.h.6': 0,\n+ 'transformer.h.7': 0,\n+ 'transformer.h.8': 0,\n+ 'transformer.h.9': 0,\n+ 'transformer.h.10': 0,\n+ 'transformer.h.11': 0,\n+ 'transformer.h.12': 0,\n+ 'transformer.h.13': 0,\n+ 'transformer.h.14': 0,\n+ 'transformer.h.15': 0,\n+ 'transformer.h.16': 0,\n+ 'transformer.h.17': 0,\n+ 'transformer.h.18': 0,\n+ 'transformer.h.19': 0,\n+ 'transformer.h.20': 0,\n+ 'transformer.h.21': 0,\n+ 'transformer.h.22': 0,\n+ 'transformer.h.23': 0,\n+ 'transformer.h.24': 1,\n+ 'transformer.h.25': 1,\n+ 'transformer.h.26': 1,\n+ 'transformer.h.27': 1,\n+ 'transformer.ln_f': 1,\n+ 'lm_head': 1}\n+ ```\n+\n+You can also design your `device_map` yourself, if you prefer to explicitly decide where each layer should be. In this case, the command above becomes:\n+\n+```py\n+model = load_checkpoint_and_dispatch(model, \"sharded-gpt-j-6B\", device_map=my_device_map)\n+```\n+\n+## Run the model\n+\n+Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model:\n+\n+```py\n+from transformers import AutoTokenizer\n+\n+tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n+inputs = tokenizer(\"Hello, my name is\", return_tensors=\"pt\")\n+inputs = inputs.to(0)\n+output = model.generate(inputs[\"input_ids\"])\n+tokenizer.decode(output[0].tolist())\n+```\n+\n+Behind the scenes, Accelerate added hooks to the model, so that:\n+- at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works)\n+- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass, and cleaned up just after\n+- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass, and cleaned up just after\n+\n+This way, you model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM!\n+\n+<Tip warning={true}>\n+\n+This only supports inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.\n+\n+</Tip>\n+\n+## Limits and further development\n+\n+We are aware of the current limitations in the API:\n+\n+- While this could theoretically work just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development.\n+- [`infer_auto_device_map`] (or `device_map=\"auto\"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to lack of RAM.\n+- [`infer_auto_device_map`] (or `device_map=\"auto\"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk.\n+- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.\n+- The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle.\n+- When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before.\n+- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).\n+\n+## API doc\n+\n+[[autodoc]] cpu_offload\n+\n+[[autodoc]] disk_offload\n+\n+[[autodoc]] dispatch_model\n+\n+[[autodoc]] infer_auto_device_map\n+\n+[[autodoc]] init_empty_weights\n+\n+[[autodoc]] load_checkpoint_and_dispatch\n+\n+[[autodoc]] load_checkpoint_in_model\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex b74c683b4..2f7ac4af2 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -5,6 +5,7 @@\n __version__ = \"0.8.0.dev0\"\n \n from .accelerator import Accelerator\n+from .big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights, load_checkpoint_and_dispatch\n from .launchers import debug_launcher, notebook_launcher\n from .utils import (\n DeepSpeedPlugin,\n@@ -13,5 +14,7 @@\n GradScalerKwargs,\n InitProcessGroupKwargs,\n find_executable_batch_size,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n synchronize_rng_states,\n )\ndiff --git a/src/accelerate/big_modeling.py b/src/accelerate/big_modeling.py\nnew file mode 100644\nindex 000000000..9db015427\n--- /dev/null\n+++ b/src/accelerate/big_modeling.py\n@@ -0,0 +1,285 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+\n+ <Tip warning={true}>\n+\n+ Any model created under this context manager has no weights. As such you can't do something like\n+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameters())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameters())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that\n+ `\"disk\"` is accepted even if it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ # Error early if the device map is incomplete.\n+ check_device_map(model, device_map)\n+\n+ if main_device is None:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n+\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n+ if state_dict is None and len(cpu_modules) > 0:\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n+\n+ disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n+ if offload_dir is None and len(disk_modules) > 0:\n+ raise ValueError(\n+ \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\n+ f\"need to be offloaded: {', '.join(disk_modules)}.\"\n+ )\n+ if len(disk_modules) > 0 and (\n+ not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\n+ ):\n+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\n+ offload_state_dict(offload_dir, disk_state_dict)\n+\n+ execution_device = {\n+ name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\n+ }\n+ offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\n+ save_folder = offload_dir if len(disk_modules) > 0 else None\n+ if state_dict is not None or save_folder is not None:\n+ weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder)\n+ else:\n+ weights_map = None\n+\n+ attach_align_device_hook_on_blocks(\n+ model,\n+ execution_device=execution_device,\n+ offload=offload,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ model.hf_device_map = device_map\n+ return model\n+\n+\n+def load_checkpoint_and_dispatch(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+ offload_buffers: bool = False,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+ offload_state_dict: bool = False,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded and adds the various hooks that will make this model run properly (even if split across devices).\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+\n+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map=\"auto\"`.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU\n+ and the available CPU RAM if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as\n+ well as the parameters.\n+ dtype (`str` or `torch.dtype`, *optional*):\n+ If provided, the weights will be converted to that type when loaded.\n+ offload_state_dict (`bool`, *optional*, defaults to `False`):\n+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\n+ the weight of the CPU state dict + the biggest shard does not fit.\n+ \"\"\"\n+ if device_map == \"auto\":\n+ device_map = infer_auto_device_map(\n+ model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype\n+ )\n+ load_checkpoint_in_model(\n+ model,\n+ checkpoint,\n+ device_map=device_map,\n+ offload_folder=offload_folder,\n+ dtype=dtype,\n+ offload_state_dict=offload_state_dict,\n+ )\n+ if device_map is None:\n+ return model\n+ return dispatch_model(model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers)\ndiff --git a/src/accelerate/hooks.py b/src/accelerate/hooks.py\nnew file mode 100644\nindex 000000000..0a5bca96d\n--- /dev/null\n+++ b/src/accelerate/hooks.py\n@@ -0,0 +1,411 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import functools\n+from typing import Dict, Mapping, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device\n+\n+\n+class ModelHook:\n+ \"\"\"\n+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference\n+ with PyTorch existing hooks is that they get passed along the kwargs.\n+\n+ Class attribute:\n+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under\n+ the `torch.no_grad()` context manager.\n+ \"\"\"\n+\n+ no_grad = False\n+\n+ def init_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is attached to the module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module attached to this hook.\n+ \"\"\"\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ \"\"\"\n+ To be executed just before the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.\n+ args (`Tuple[Any]`): The positional arguments passed to the module.\n+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.\n+\n+ Returns:\n+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.\n+ \"\"\"\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ \"\"\"\n+ To be executed just after the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.\n+ output (`Any`): The output of the module.\n+\n+ Returns:\n+ `Any`: The processed `output`.\n+ \"\"\"\n+ return output\n+\n+ def detach_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is deached from a module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module detached from this hook.\n+ \"\"\"\n+ return module\n+\n+\n+class SequentialHook(ModelHook):\n+ \"\"\"\n+ A hook that can contain several hooks and iterates through them at each event.\n+ \"\"\"\n+\n+ def __init__(self, *hooks):\n+ self.hooks = hooks\n+\n+ def init_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.init_hook(module)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ for hook in self.hooks:\n+ args, kwargs = hook.pre_forward(module, *args, **kwargs)\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ for hook in self.hooks:\n+ output = hook.post_forward(module, output)\n+ return output\n+\n+ def detach_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.detach_hook(module)\n+ return module\n+\n+\n+def add_hook_to_module(module: nn.Module, hook: ModelHook):\n+ \"\"\"\n+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\n+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.\n+\n+ <Tip warning={true}>\n+\n+ If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\n+ use the `SequentialHook` class.\n+\n+ </Tip>\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+ hook (`ModelHook`): The hook to attach.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\n+ # If we already put some hook on this module, we replace it with the new one.\n+ old_forward = module._old_forward\n+ else:\n+ old_forward = module.forward\n+ module._old_forward = old_forward\n+\n+ module = hook.init_hook(module)\n+ module._hf_hook = hook\n+\n+ @functools.wraps(old_forward)\n+ def new_forward(*args, **kwargs):\n+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)\n+ if module._hf_hook.no_grad:\n+ with torch.no_grad():\n+ output = old_forward(*args, **kwargs)\n+ else:\n+ output = old_forward(*args, **kwargs)\n+ return module._hf_hook.post_forward(module, output)\n+\n+ module.forward = new_forward\n+ return module\n+\n+\n+def remove_hook_from_module(module: nn.Module):\n+ \"\"\"\n+ Removes any hook attached to a module via `add_hook_to_module`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\"):\n+ module._hf_hook.detach_hook(module)\n+ delattr(module, \"_hf_hook\")\n+\n+ if hasattr(module, \"_old_forward\"):\n+ module.forward = module._old_forward\n+ delattr(module, \"_old_forward\")\n+\n+ return module\n+\n+\n+class AlignDevicesHook(ModelHook):\n+ \"\"\"\n+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the\n+ associated module, potentially offloading the weights after the forward pass.\n+\n+ Args:\n+ execution_device (`torch.device`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass.\n+ io_same_device (`bool`, *optional*, defaults to `False`):\n+ Whether or not the output should be placed on the same device as the input was.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ place_submodules (`bool`, *optional*, defaults to `False`):\n+ Whether to place the submodules on `execution_device` during the `init_hook` event.\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ execution_device: Optional[Union[int, str, torch.device]] = None,\n+ offload: bool = False,\n+ io_same_device: bool = False,\n+ weights_map: Optional[Mapping] = None,\n+ offload_buffers: bool = False,\n+ place_submodules: bool = False,\n+ ):\n+ self.execution_device = execution_device\n+ self.offload = offload\n+ self.io_same_device = io_same_device\n+ self.weights_map = weights_map\n+ self.offload_buffers = offload_buffers\n+ self.place_submodules = place_submodules\n+\n+ # Will contain the input device when `io_same_device=True`.\n+ self.input_device = None\n+ self.param_original_devices = {}\n+ self.buffer_original_devices = {}\n+\n+ def init_hook(self, module):\n+ if not self.offload and self.execution_device is not None:\n+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):\n+ set_module_tensor_to_device(module, name, self.execution_device)\n+ elif self.offload:\n+ self.original_devices = {name: param.device for name, param in named_module_tensors(module)}\n+ if self.weights_map is None:\n+ self.weights_map = {\n+ name: param.to(\"cpu\")\n+ for name, param in named_module_tensors(module, include_buffers=self.offload_buffers)\n+ }\n+\n+ for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ set_module_tensor_to_device(module, name, \"meta\")\n+ if not self.offload_buffers and self.execution_device is not None:\n+ for name, _ in module.named_buffers(recurse=False):\n+ set_module_tensor_to_device(module, name, self.execution_device)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ if self.io_same_device:\n+ self.input_device = find_device([args, kwargs])\n+ if self.offload:\n+ for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ set_module_tensor_to_device(module, name, self.execution_device, value=self.weights_map[name])\n+\n+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)\n+\n+ def post_forward(self, module, output):\n+ if self.offload:\n+ for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ set_module_tensor_to_device(module, name, \"meta\")\n+\n+ if self.io_same_device and self.input_device is not None:\n+ output = send_to_device(output, self.input_device)\n+\n+ return output\n+\n+ def detach_hook(self, module):\n+ if self.offload:\n+ for name, device in self.original_devices.items():\n+ if device != torch.device(\"meta\"):\n+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))\n+\n+\n+def attach_align_device_hook(\n+ module: torch.nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload: bool = False,\n+ weights_map: Optional[Mapping] = None,\n+ offload_buffers: bool = False,\n+ module_name: str = \"\",\n+):\n+ \"\"\"\n+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or\n+ buffers.\n+\n+ Args:\n+ module (`torch.nn.Module`):\n+ The module where we want to attach the hooks.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ module_name (`str`, *optional*, defaults to `\"\"`):\n+ The name of the module.\n+ \"\"\"\n+ # Attach the hook on this module if it has any direct tensor.\n+ directs = named_module_tensors(module)\n+ if len(list(directs)) > 0:\n+ if weights_map is not None:\n+ prefix = f\"{module_name}.\" if len(module_name) > 0 else \"\"\n+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)\n+ else:\n+ prefixed_weights_map = None\n+ hook = AlignDevicesHook(\n+ execution_device=execution_device,\n+ offload=offload,\n+ weights_map=prefixed_weights_map,\n+ offload_buffers=offload_buffers,\n+ )\n+ add_hook_to_module(module, hook)\n+\n+ # Recurse on all children of the module.\n+ for child_name, child in module.named_children():\n+ child_name = f\"{module_name}.{child_name}\" if len(module_name) > 0 else child_name\n+ attach_align_device_hook(\n+ child,\n+ execution_device=execution_device,\n+ offload=offload,\n+ weights_map=weights_map,\n+ offload_buffers=offload_buffers,\n+ module_name=child_name,\n+ )\n+\n+\n+def remove_hook_from_submodules(module: nn.Module):\n+ \"\"\"\n+ Recursively removes all hooks attached on the submodules of a given model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module on which to remove all hooks.\n+ \"\"\"\n+ remove_hook_from_module(module)\n+ for child in module.children():\n+ remove_hook_from_submodules(child)\n+\n+\n+def attach_align_device_hook_on_blocks(\n+ module: nn.Module,\n+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,\n+ offload: Union[bool, Dict[str, bool]] = False,\n+ weights_map: Mapping = None,\n+ offload_buffers: bool = False,\n+ module_name: str = \"\",\n+):\n+ \"\"\"\n+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.\n+\n+ Args:\n+ module (`torch.nn.Module`):\n+ The module where we want to attach the hooks.\n+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass. It can be one device\n+ for the whole module, or a dictionary mapping module name to device.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole\n+ module, or a dictionary mapping module name to boolean.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ module_name (`str`, *optional*, defaults to `\"\"`):\n+ The name of the module.\n+ \"\"\"\n+ # If one device and one offload, we've got one hook.\n+ if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):\n+ if not offload:\n+ hook = AlignDevicesHook(execution_device=execution_device, io_same_device=True, place_submodules=True)\n+ add_hook_to_module(module, hook)\n+ else:\n+ attach_align_device_hook(\n+ module,\n+ execution_device=execution_device,\n+ offload=True,\n+ weights_map=weights_map,\n+ offload_buffers=offload_buffers,\n+ module_name=module_name,\n+ )\n+ return\n+\n+ if not isinstance(execution_device, Mapping):\n+ execution_device = {key: offload for key in offload.keys()}\n+ if not isinstance(offload, Mapping):\n+ offload = {key: offload for key in execution_device.keys()}\n+\n+ if module_name in execution_device and not offload[module_name]:\n+ hook = AlignDevicesHook(\n+ execution_device=execution_device[module_name],\n+ offload_buffers=offload_buffers,\n+ io_same_device=(module_name == \"\"),\n+ place_submodules=True,\n+ )\n+ add_hook_to_module(module, hook)\n+ elif module_name in execution_device:\n+ attach_align_device_hook(\n+ module,\n+ execution_device=execution_device[module_name],\n+ offload=True,\n+ weights_map=weights_map,\n+ offload_buffers=offload_buffers,\n+ module_name=module_name,\n+ )\n+ if not hasattr(module, \"_hf_hook\"):\n+ hook = AlignDevicesHook(execution_device=execution_device[module_name], io_same_device=(module_name == \"\"))\n+ add_hook_to_module(module, hook)\n+ elif module_name == \"\":\n+ hook = AlignDevicesHook(io_same_device=True)\n+ add_hook_to_module(module, hook)\n+\n+ for child_name, child in module.named_children():\n+ child_name = f\"{module_name}.{child_name}\" if len(module_name) > 0 else child_name\n+ attach_align_device_hook_on_blocks(\n+ child,\n+ execution_device=execution_device,\n+ offload=offload,\n+ weights_map=weights_map,\n+ offload_buffers=offload_buffers,\n+ module_name=child_name,\n+ )\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex 03aef45b3..46723ebfa 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -2,5 +2,5 @@\n # There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n # module, but to preserve other warnings. So, don't check this module at all.\n \n-from .testing import are_the_same_tensors, execute_subprocess_async, require_cuda, require_multi_gpu, require_tpu\n+from .testing import are_the_same_tensors, execute_subprocess_async, require_cuda, require_multi_gpu, require_tpu, slow\n from .training import RegressionDataset, RegressionModel\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nindex a43c7d261..58b33c1ad 100644\n--- a/src/accelerate/utils/__init__.py\n+++ b/src/accelerate/utils/__init__.py\n@@ -30,6 +30,20 @@\n is_tpu_available,\n is_wandb_available,\n )\n+from .modeling import (\n+ check_device_map,\n+ compute_module_sizes,\n+ convert_file_size_to_int,\n+ dtype_byte_size,\n+ find_tied_parameters,\n+ get_max_layer_size,\n+ get_max_memory,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ named_module_tensors,\n+ set_module_tensor_to_device,\n+)\n+from .offload import OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, offload_state_dict\n from .operations import (\n broadcast,\n broadcast_object_list,\n@@ -37,6 +51,7 @@\n convert_outputs_to_fp32,\n convert_to_fp32,\n find_batch_size,\n+ find_device,\n gather,\n gather_object,\n get_data_structure,\ndiff --git a/src/accelerate/utils/modeling.py b/src/accelerate/utils/modeling.py\nnew file mode 100644\nindex 000000000..08c6f269d\n--- /dev/null\n+++ b/src/accelerate/utils/modeling.py\n@@ -0,0 +1,624 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+import shutil\n+import tempfile\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device not in [\"meta\", torch.device(\"meta\")] and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module, dtype: Optional[Union[str, torch.device]] = None):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ if isinstance(dtype, str):\n+ # We accept \"torch.float16\" or just \"float16\"\n+ dtype = dtype.replace(\"torch.\", \"\")\n+ dtype = getattr(torch, dtype)\n+ if dtype is not None:\n+ dtype_size = dtype_byte_size(dtype)\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ if dtype is None:\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ else:\n+ size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype))\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ prefix = \"\" if module_name == \"\" else f\"{module_name}.\"\n+ values = [v for k, v in device_map.items() if k.startswith(prefix)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(prefix)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ dtype (`str` or `torch.dtype`, *optional*):\n+ If provided, the weights will be converted to that type when loaded.\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys())\n+ gpus = [device for device in devices if device != \"cpu\"]\n+ if \"disk\" not in devices:\n+ devices.append(\"disk\")\n+\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [gpus[0], \"cpu\"] if len(gpus) > 0 else [\"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model, dtype=dtype)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ # Assess size needed\n+ module_size = module_sizes[name]\n+ tied_params = [v for k, v in tied_parameters.items() if name in k]\n+ # We ignore parameters that are tied when they're tied to > 1 one\n+ tied_param = tied_params[0] if len(tied_params) == 1 else None\n+\n+ device = devices[current_device]\n+ current_max_size = max_memory[device] if device != \"disk\" else None\n+ # Reduce max size available by the largest layer.\n+ if devices[current_device] in main_devices:\n+ current_max_size = current_max_size - max_layer_size\n+ # Case 1 -> We're too big!\n+ if current_max_size is not None and current_memory_used + module_size > current_max_size:\n+ # Split or not split?\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # -> no split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # -> split, we replace the module studied by its children + parameters\n+ modules_children = list(module.named_parameters(recurse=False)) + modules_children\n+ modules_to_treat = [(f\"{name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+\n+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.\n+ elif tied_param is not None:\n+ # Determine the sized occupied by this module + the module containing the tied parameter\n+ tied_module_size = module_size\n+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]\n+ tied_module_name, tied_module = modules_to_treat[tied_module_index]\n+ tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]\n+ if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:\n+ # Split or not split?\n+ tied_module_children = list(tied_module.named_children())\n+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:\n+ # If the tied module is not split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # Otherwise, we replace the tied module by its children.\n+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children\n+ tied_module_children = [(f\"{tied_module_name}.{n}\", v) for n, v in tied_module_children]\n+ modules_to_treat = (\n+ [(name, module)]\n+ + modules_to_treat[:tied_module_index]\n+ + tied_module_children\n+ + modules_to_treat[tied_module_index + 1 :]\n+ )\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ else:\n+ # We really really fit!\n+ current_memory_used += tied_module_size\n+ device_map[name] = devices[current_device]\n+ modules_to_treat.pop(tied_module_index)\n+ device_map[tied_module_name] = devices[current_device]\n+ else:\n+ current_memory_used += module_size\n+ device_map[name] = devices[current_device]\n+\n+ return clean_device_map(device_map)\n+\n+\n+def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):\n+ \"\"\"\n+ Checks a device map covers everything in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to check the device map against.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.\n+ \"\"\"\n+ all_model_tensors = [name for name, _ in model.state_dict().items()]\n+ for module_name in device_map.keys():\n+ all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]\n+ if len(all_model_tensors) > 0:\n+ non_covered_params = \", \".join(all_model_tensors)\n+ raise ValueError(\n+ f\"The device_map provided does not give any device for the following parameters: {non_covered_params}\"\n+ )\n+\n+\n+def load_checkpoint_in_model(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+ dtype: Optional[Union[str, torch.dtype]] = None,\n+ offload_state_dict: bool = False,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded.\n+\n+ <Tip warning={true}>\n+\n+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To\n+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ dtype (`str` or `torch.dtype`, *optional*):\n+ If provided, the weights will be converted to that type when loaded.\n+ offload_state_dict (`bool`, *optional*, defaults to `False`):\n+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoig getting out of CPU RAM if\n+ the weight of the CPU state dict + the biggest shard does not fit.\n+ \"\"\"\n+ if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n+ raise ValueError(\n+ \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"\n+ )\n+ elif offload_folder is not None and device_map is not None and \"disk\" in device_map.values():\n+ os.makedirs(offload_folder, exist_ok=True)\n+\n+ if isinstance(dtype, str):\n+ # We accept \"torch.float16\" or just \"float16\"\n+ dtype = dtype.replace(\"torch.\", \"\")\n+ dtype = getattr(torch, dtype)\n+\n+ checkpoint_files = None\n+ index_filename = None\n+ if os.path.isfile(checkpoint):\n+ if str(checkpoint).endswith(\".json\"):\n+ index_filename = checkpoint\n+ else:\n+ checkpoint_files = [checkpoint]\n+ elif os.path.isdir(checkpoint):\n+ potential_index = [f for f in os.listdir(checkpoint) if f.endswith(\".index.json\")]\n+ if len(potential_index) == 0:\n+ raise ValueError(f\"{checkpoint} is not a folder containing a `.index.json` file.\")\n+ elif len(potential_index) == 1:\n+ index_filename = os.path.join(checkpoint, potential_index[0])\n+ else:\n+ raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\n+ else:\n+ raise ValueError(\n+ \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\n+ f\"checkpoint, or a folder containing a sharded checkpoint, but got {checkpoint}.\"\n+ )\n+\n+ if index_filename is not None:\n+ checkpoint_folder = os.path.split(index_filename)[0]\n+ with open(index_filename, \"r\") as f:\n+ index = json.loads(f.read())\n+\n+ if \"weight_map\" in index:\n+ index = index[\"weight_map\"]\n+ checkpoint_files = sorted(list(set(index.values())))\n+ checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]\n+\n+ # Logic for missing/unexepected keys goes here.\n+\n+ offload_index = {}\n+ if offload_state_dict:\n+ state_dict_folder = tempfile.mkdtemp()\n+ state_dict_index = {}\n+\n+ for checkpoint_file in checkpoint_files:\n+ checkpoint = torch.load(checkpoint_file)\n+ if device_map is None:\n+ model.load_state_dict(checkpoint, strict=False)\n+ else:\n+ for param_name, param in checkpoint.items():\n+ module_name = param_name\n+ if dtype is not None:\n+ param = param.to(dtype)\n+ while len(module_name) > 0 and module_name not in device_map:\n+ module_name = \".\".join(module_name.split(\".\")[:-1])\n+ if module_name == \"\" and \"\" not in device_map:\n+ # TODO: group all errors and raise at the end.\n+ raise ValueError(f\"{param_name} doesn't have any device set.\")\n+ param_device = device_map[module_name]\n+\n+ if param_device == \"disk\":\n+ set_module_tensor_to_device(model, param_name, \"meta\")\n+ tensor_file = os.path.join(offload_folder, f\"{param_name}.dat\")\n+ array = param.numpy()\n+ offload_index[param_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n+ file_array[:] = array[:]\n+ file_array.flush()\n+ elif param_device == \"cpu\" and offload_state_dict:\n+ set_module_tensor_to_device(model, param_name, \"meta\")\n+ tensor_file = os.path.join(state_dict_folder, f\"{param_name}.dat\")\n+ array = param.numpy()\n+ state_dict_index[param_name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n+ file_array[:] = array[:]\n+ file_array.flush()\n+ else:\n+ set_module_tensor_to_device(model, param_name, param_device, value=param)\n+\n+ # Force Python to clean up.\n+ del checkpoint\n+ gc.collect()\n+\n+ if len(offload_index) > 0:\n+ offload_index_file = os.path.join(offload_folder, \"index.json\")\n+ if os.path.isfile(offload_index_file):\n+ with open(offload_index_file, \"r\", encoding=\"utf-8\") as f:\n+ current_offload_index = json.load(f)\n+ else:\n+ current_offload_index = {}\n+ current_offload_index.update(offload_index)\n+\n+ with open(offload_index_file, \"w\", encoding=\"utf-8\") as f:\n+ json.dump(current_offload_index, f, indent=2)\n+\n+ # Load back offloaded state dict on CPU\n+ if offload_state_dict and len(state_dict_index) > 0:\n+ for param_name, metadata in state_dict_index.items():\n+ tensor_file = os.path.join(state_dict_folder, f\"{param_name}.dat\")\n+ shape = tuple(metadata[\"shape\"])\n+ weight = np.memmap(tensor_file, dtype=metadata[\"dtype\"], mode=\"r\", shape=shape)\n+ set_module_tensor_to_device(model, param_name, \"cpu\", value=torch.tensor(weight))\n+ shutil.rmtree(state_dict_folder)\ndiff --git a/src/accelerate/utils/offload.py b/src/accelerate/utils/offload.py\nnew file mode 100644\nindex 000000000..17f33d7c6\n--- /dev/null\n+++ b/src/accelerate/utils/offload.py\n@@ -0,0 +1,143 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from collections.abc import Mapping\n+from typing import Dict, List, Optional, Union\n+\n+import numpy as np\n+import torch\n+\n+\n+def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):\n+ \"\"\"\n+ Offload a state dict in a given folder.\n+\n+ Args:\n+ save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict.\n+ state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload.\n+ \"\"\"\n+ os.makedirs(save_dir, exist_ok=True)\n+ index = {}\n+ for name, parameter in state_dict.items():\n+ tensor_file = os.path.join(save_dir, f\"{name}.dat\")\n+ array = parameter.numpy()\n+ index[name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ if array.ndim == 0:\n+ array = array[None]\n+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n+ file_array[:] = array[:]\n+ file_array.flush()\n+\n+ # Update index\n+ index_file = os.path.join(save_dir, \"index.json\")\n+ if os.path.isfile(index_file):\n+ with open(index_file, \"r\", encoding=\"utf-8\") as f:\n+ current_index = json.load(f)\n+ else:\n+ current_index = {}\n+ current_index.update(index)\n+\n+ with open(index_file, \"w\", encoding=\"utf-8\") as f:\n+ json.dump(current_index, f, indent=2)\n+\n+\n+class PrefixedDataset(Mapping):\n+ \"\"\"\n+ Will access keys in a given dataset by adding a prefix.\n+\n+ Args:\n+ dataset (`Mapping`): Any map with string keys.\n+ prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.\n+ \"\"\"\n+\n+ def __init__(self, dataset: Mapping, prefix: str):\n+ self.dataset = dataset\n+ self.prefix = prefix\n+\n+ def __getitem__(self, key):\n+ return self.dataset[f\"{self.prefix}{key}\"]\n+\n+ def __iter__(self):\n+ return iter([key for key in self.dataset if key.startswith(self.prefix)])\n+\n+ def __len__(self):\n+ return len(self.dataset)\n+\n+\n+class OffloadedWeightsLoader(Mapping):\n+ \"\"\"\n+ A collection that loads weights stored in a given state dict or memory-mapped on disk.\n+\n+ Args:\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ A dictionary parameter name to tensor.\n+ save_folder (`str` or `os.PathLike`, *optional*):\n+ The directory in which the weights are stored (by `offload_state_dict` for instance).\n+ index (`Dict`, *optional*):\n+ A dictionary from weight name to their information (`dtype` and `shape`). Will default to the index saved\n+ in `save_folder`.\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ state_dict: Dict[str, torch.Tensor] = None,\n+ save_folder: Optional[Union[str, os.PathLike]] = None,\n+ index: Mapping = None,\n+ ):\n+ if state_dict is None and save_folder is None:\n+ raise ValueError(\"Need either a `state_dict` or a `save_folder` containing offloaded weights.\")\n+\n+ self.state_dict = {} if state_dict is None else state_dict\n+ self.save_folder = save_folder\n+ if index is None and save_folder is not None:\n+ with open(os.path.join(save_folder, \"index.json\")) as f:\n+ index = json.load(f)\n+ self.index = {} if index is None else index\n+ self.all_keys = list(self.state_dict.keys())\n+ self.all_keys.extend([key for key in self.index if key not in self.all_keys])\n+\n+ def __getitem__(self, key: str):\n+ # State dict gets priority\n+ if key in self.state_dict:\n+ return self.state_dict[key]\n+ weight_info = self.index[key]\n+ weight_file = os.path.join(self.save_folder, f\"{key}.dat\")\n+ shape = tuple(weight_info[\"shape\"])\n+ if shape == ():\n+ weight = np.memmap(weight_file, dtype=weight_info[\"dtype\"], shape=(1,), mode=\"r\")[0]\n+ else:\n+ weight = np.memmap(weight_file, dtype=weight_info[\"dtype\"], shape=shape, mode=\"r\")\n+ return torch.tensor(weight)\n+\n+ def __iter__(self):\n+ return iter(self.all_keys)\n+\n+ def __len__(self):\n+ return len(self.all_keys)\n+\n+\n+def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):\n+ \"\"\"\n+ Extract the sub state-dict corresponding to a list of given submodules.\n+\n+ Args:\n+ state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.\n+ submodule_names (`List[str]`): The list of submodule names we want to extract.\n+ \"\"\"\n+ result = {}\n+ for module_name in submodule_names:\n+ result.update({key: param for key, param in state_dict.items() if key.startswith(module_name)})\n+ return result\ndiff --git a/src/accelerate/utils/operations.py b/src/accelerate/utils/operations.py\nindex b61ae069b..3fc9d969c 100644\n--- a/src/accelerate/utils/operations.py\n+++ b/src/accelerate/utils/operations.py\n@@ -490,3 +490,24 @@ def __call__(self, *args, **kwargs):\n \n \n convert_outputs_to_fp32 = ConvertOutputsToFp32\n+\n+\n+def find_device(data):\n+ \"\"\"\n+ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).\n+\n+ Args:\n+ (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.\n+ \"\"\"\n+ if isinstance(data, Mapping):\n+ for obj in data.values():\n+ device = find_device(obj)\n+ if device is not None:\n+ return device\n+ elif isinstance(data, (tuple, list)):\n+ for obj in data:\n+ device = find_device(obj)\n+ if device is not None:\n+ return device\n+ elif isinstance(data, torch.Tensor):\n+ return data.device\ndiff --git a/tests/test_big_modeling.py b/tests/test_big_modeling.py\nnew file mode 100644\nindex 000000000..be2238794\n--- /dev/null\n+++ b/tests/test_big_modeling.py\n@@ -0,0 +1,276 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import unittest\n+from tempfile import TemporaryDirectory\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.big_modeling import (\n+ cpu_offload,\n+ disk_offload,\n+ dispatch_model,\n+ init_empty_weights,\n+ load_checkpoint_and_dispatch,\n+)\n+from accelerate.hooks import remove_hook_from_submodules\n+from accelerate.test_utils import require_cuda, require_multi_gpu, slow\n+from accelerate.utils import offload_state_dict\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class BiggerModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.linear2 = nn.Linear(4, 5)\n+ self.batchnorm = nn.BatchNorm1d(5)\n+ self.linear3 = nn.Linear(5, 6)\n+ self.linear4 = nn.Linear(6, 5)\n+\n+ def forward(self, x):\n+ return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n+\n+\n+class BigModelingTester(unittest.TestCase):\n+ def test_init_empty_weights(self):\n+ # base use\n+ with init_empty_weights():\n+ module = nn.Linear(4, 5)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+\n+ # base use with buffers, they are not touched\n+ with init_empty_weights():\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ # Use with include_buffers=True\n+ with init_empty_weights(include_buffers=True):\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"meta\"))\n+\n+ # Double check we didn't break PyTorch\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ def test_init_empty_weights_very_large_model(self):\n+ # This is a 100 billion parameters model.\n+ with init_empty_weights():\n+ _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+\n+ def test_cpu_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ cpu_offload(model, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ cpu_offload(model, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_cpu_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ cpu_offload(gpt2, execution_device=0)\n+ outputs = gpt2.generate(inputs[\"input_ids\"])\n+ self.assertEqual(\n+ tokenizer.decode(outputs[0].tolist()),\n+ \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n+ )\n+\n+ def test_disk_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(model, tmp_dir, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_disk_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(gpt2, tmp_dir, execution_device=0)\n+ outputs = gpt2.generate(inputs[\"input_ids\"])\n+ self.assertEqual(\n+ tokenizer.decode(outputs[0].tolist()),\n+ \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n+ )\n+\n+ @require_cuda\n+ def test_dispatch_model(self):\n+ model = ModelForTest()\n+ device_map = {\"linear1\": \"disk\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ dispatch_model(model, device_map, offload_dir=tmp_dir)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_multi_gpu\n+ def test_dispatch_model_multi_gpu(self):\n+ model = BiggerModelForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 1}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ dispatch_model(model, device_map, offload_dir=tmp_dir)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @slow\n+ @require_multi_gpu\n+ def test_dispatch_model_gpt2_on_two_gpus(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ # Dispatch on GPUs 0 and 1\n+ device_map = {\n+ \"transformer.wte\": 0,\n+ \"transformer.wpe\": 0,\n+ \"transformer.ln_f\": 1,\n+ \"lm_head\": 1,\n+ }\n+ for i in range(12):\n+ device_map[f\"transformer.h.{i}\"] = 0 if i <= 5 else 1\n+\n+ gpt2 = dispatch_model(gpt2, device_map)\n+ outputs = gpt2.generate(inputs[\"input_ids\"])\n+ self.assertEqual(\n+ tokenizer.decode(outputs[0].tolist()),\n+ \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n+ )\n+\n+ # Dispatch with a bit of CPU offload\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ for i in range(4):\n+ device_map[f\"transformer.h.{i}\"] = \"cpu\"\n+ gpt2 = dispatch_model(gpt2, device_map)\n+ outputs = gpt2.generate(inputs[\"input_ids\"])\n+ self.assertEqual(\n+ tokenizer.decode(outputs[0].tolist()),\n+ \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n+ )\n+ # Dispatch with a bit of CPU and disk offload\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ for i in range(2):\n+ device_map[f\"transformer.h.{i}\"] = \"disk\"\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ state_dict = {\n+ k: p for k, p in gpt2.state_dict().items() if \"transformer.h.0\" in k or \"transformer.h.1\" in k\n+ }\n+ offload_state_dict(tmp_dir, state_dict)\n+ gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir)\n+ outputs = gpt2.generate(inputs[\"input_ids\"])\n+ self.assertEqual(\n+ tokenizer.decode(outputs[0].tolist()),\n+ \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n+ )\n+\n+ @require_cuda\n+ def test_load_checkpoint_and_dispatch(self):\n+ model = ModelForTest()\n+ device_map = {\"linear1\": \"cpu\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), checkpoint)\n+\n+ new_model = ModelForTest()\n+ new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)\n+\n+ # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n+ self.assertEqual(new_model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear2.weight.device, torch.device(0))\n+\n+ output = new_model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_multi_gpu\n+ def test_load_checkpoint_and_dispatch_multi_gpu(self):\n+ model = BiggerModelForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"cpu\", \"batchnorm\": 0, \"linear3\": 0, \"linear4\": 1}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), checkpoint)\n+\n+ new_model = BiggerModelForTest()\n+ new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)\n+\n+ # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n+ self.assertEqual(new_model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear2.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(new_model.linear3.weight.device, torch.device(0))\n+ self.assertEqual(new_model.linear4.weight.device, torch.device(1))\n+\n+ output = new_model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\ndiff --git a/tests/test_hooks.py b/tests/test_hooks.py\nnew file mode 100644\nindex 000000000..51c434ab1\n--- /dev/null\n+++ b/tests/test_hooks.py\n@@ -0,0 +1,330 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import unittest\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.hooks import (\n+ AlignDevicesHook,\n+ ModelHook,\n+ SequentialHook,\n+ add_hook_to_module,\n+ attach_align_device_hook,\n+ remove_hook_from_module,\n+ remove_hook_from_submodules,\n+)\n+from accelerate.test_utils import require_multi_gpu\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class PreForwardHook(ModelHook):\n+ def pre_forward(self, module, *args, **kwargs):\n+ return (args[0] + 1,) + args[1:], kwargs\n+\n+\n+class PostForwardHook(ModelHook):\n+ def post_forward(self, module, output):\n+ return output + 1\n+\n+\n+class HooksModelTester(unittest.TestCase):\n+ def test_add_and_remove_hooks(self):\n+ test_model = ModelForTest()\n+ test_hook = ModelHook()\n+\n+ add_hook_to_module(test_model, test_hook)\n+ self.assertEqual(test_model._hf_hook, test_hook)\n+ self.assertTrue(hasattr(test_model, \"_old_forward\"))\n+\n+ # Check adding the hook did not change the name or the signature\n+ self.assertEqual(test_model.forward.__name__, \"forward\")\n+ self.assertListEqual(list(inspect.signature(test_model.forward).parameters), [\"x\"])\n+\n+ remove_hook_from_module(test_model)\n+ self.assertFalse(hasattr(test_model, \"_hf_hook\"))\n+ self.assertFalse(hasattr(test_model, \"_old_forward\"))\n+\n+ def test_pre_forward_hook_is_executed(self):\n+ test_model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = test_model(x + 1)\n+ expected2 = test_model(x + 2)\n+\n+ test_hook = PreForwardHook()\n+ add_hook_to_module(test_model, test_hook)\n+ output1 = test_model(x)\n+ self.assertTrue(torch.allclose(output1, expected))\n+\n+ # Attaching a hook to a model when it already has one replaces, does not chain\n+ test_hook = PreForwardHook()\n+ add_hook_to_module(test_model, test_hook)\n+ output1 = test_model(x)\n+ self.assertTrue(torch.allclose(output1, expected))\n+\n+ # You need to use the sequential hook to chain two or more hooks\n+ test_hook = SequentialHook(PreForwardHook(), PreForwardHook())\n+ add_hook_to_module(test_model, test_hook)\n+\n+ output2 = test_model(x)\n+ assert torch.allclose(output2, expected2)\n+\n+ def test_post_forward_hook_is_executed(self):\n+ test_model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ output = test_model(x)\n+\n+ test_hook = PostForwardHook()\n+ add_hook_to_module(test_model, test_hook)\n+ output1 = test_model(x)\n+ self.assertTrue(torch.allclose(output1, output + 1))\n+\n+ # Attaching a hook to a model when it already has one replaces, does not chain\n+ test_hook = PostForwardHook()\n+ add_hook_to_module(test_model, test_hook)\n+ output1 = test_model(x)\n+ self.assertTrue(torch.allclose(output1, output + 1))\n+\n+ # You need to use the sequential hook to chain two or more hooks\n+ test_hook = SequentialHook(PostForwardHook(), PostForwardHook())\n+ add_hook_to_module(test_model, test_hook)\n+\n+ output2 = test_model(x)\n+ assert torch.allclose(output2, output + 2)\n+\n+ def test_no_grad_in_hook(self):\n+ test_model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ output = test_model(x)\n+\n+ test_hook = PostForwardHook()\n+ add_hook_to_module(test_model, test_hook)\n+ output1 = test_model(x)\n+ self.assertTrue(torch.allclose(output1, output + 1))\n+ self.assertTrue(output1.requires_grad)\n+\n+ test_hook.no_grad = True\n+ output1 = test_model(x)\n+ self.assertFalse(output1.requires_grad)\n+\n+ @require_multi_gpu\n+ def test_align_devices_as_model_parallelism(self):\n+ model = ModelForTest()\n+ # Everything is on CPU\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # This will move each submodule on different devices\n+ add_hook_to_module(model.linear1, AlignDevicesHook(execution_device=0))\n+ add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0))\n+ add_hook_to_module(model.linear2, AlignDevicesHook(execution_device=1))\n+\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(0))\n+ self.assertEqual(model.linear2.weight.device, torch.device(1))\n+\n+ # We can still make a forward pass. The input does not need to be on any particular device\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, torch.device(1))\n+\n+ # We can add a general hook to put back output on same device as input.\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ x = torch.randn(2, 3).to(0)\n+ output = model(x)\n+ self.assertEqual(output.device, torch.device(0))\n+\n+ def test_align_devices_as_cpu_offload(self):\n+ model = ModelForTest()\n+\n+ # Everything is on CPU\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # This will move each submodule on different devices\n+ hook_kwargs = {\"execution_device\": 0 if torch.cuda.is_available() else \"cpu\", \"offload\": True}\n+\n+ add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs))\n+ add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs))\n+ add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs))\n+\n+ # Parameters have been offloaded, so on the meta device\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"meta\"))\n+ # Buffers are not included in the offload by default, so are on the execution device\n+ device = torch.device(hook_kwargs[\"execution_device\"])\n+ self.assertEqual(model.batchnorm.running_mean.device, device)\n+\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, device)\n+\n+ # Removing hooks loads back the weights in the model.\n+ remove_hook_from_module(model.linear1)\n+ remove_hook_from_module(model.batchnorm)\n+ remove_hook_from_module(model.linear2)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # Now test with buffers included in the offload\n+ hook_kwargs = {\n+ \"execution_device\": 0 if torch.cuda.is_available() else \"cpu\",\n+ \"offload\": True,\n+ \"offload_buffers\": True,\n+ }\n+\n+ add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs))\n+ add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs))\n+ add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs))\n+\n+ # Parameters have been offloaded, so on the meta device, buffers included\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"meta\"))\n+\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, device)\n+\n+ # Removing hooks loads back the weights in the model.\n+ remove_hook_from_module(model.linear1)\n+ remove_hook_from_module(model.batchnorm)\n+ remove_hook_from_module(model.linear2)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ def test_attach_align_device_hook_as_cpu_offload(self):\n+ model = ModelForTest()\n+\n+ # Everything is on CPU\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # This will move each submodule on different devices\n+ execution_device = 0 if torch.cuda.is_available() else \"cpu\"\n+ attach_align_device_hook(model, execution_device=execution_device, offload=True)\n+\n+ # Parameters have been offloaded, so on the meta device\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"meta\"))\n+ # Buffers are not included in the offload by default, so are on the execution device\n+ device = torch.device(execution_device)\n+ self.assertEqual(model.batchnorm.running_mean.device, device)\n+\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, device)\n+\n+ # Removing hooks loads back the weights in the model.\n+ remove_hook_from_submodules(model)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # Now test with buffers included in the offload\n+ attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=True)\n+\n+ # Parameters have been offloaded, so on the meta device, buffers included\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"meta\"))\n+\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, device)\n+\n+ # Removing hooks loads back the weights in the model.\n+ remove_hook_from_submodules(model)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ def test_attach_align_device_hook_as_cpu_offload_with_weight_map(self):\n+ model = ModelForTest()\n+\n+ # Everything is on CPU\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # This will move each submodule on different devices\n+ execution_device = 0 if torch.cuda.is_available() else \"cpu\"\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, weights_map=model.state_dict()\n+ )\n+\n+ # Parameters have been offloaded, so on the meta device\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"meta\"))\n+ # Buffers are not included in the offload by default, so are on the execution device\n+ device = torch.device(execution_device)\n+ self.assertEqual(model.batchnorm.running_mean.device, device)\n+\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, device)\n+\n+ # Removing hooks loads back the weights in the model.\n+ remove_hook_from_submodules(model)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # Now test with buffers included in the offload\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ weights_map=model.state_dict(),\n+ offload_buffers=True,\n+ )\n+\n+ # Parameters have been offloaded, so on the meta device, buffers included\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(\"meta\"))\n+\n+ x = torch.randn(2, 3)\n+ output = model(x)\n+ self.assertEqual(output.device, device)\n+\n+ # Removing hooks loads back the weights in the model.\n+ remove_hook_from_submodules(model)\n+ self.assertEqual(model.linear1.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\ndiff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py\nnew file mode 100644\nindex 000000000..15e164218\n--- /dev/null\n+++ b/tests/test_modeling_utils.py\n@@ -0,0 +1,360 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+import tempfile\n+import unittest\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.test_utils import require_cuda, require_multi_gpu\n+from accelerate.utils.modeling import (\n+ check_device_map,\n+ clean_device_map,\n+ compute_module_sizes,\n+ find_tied_parameters,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ named_module_tensors,\n+ set_module_tensor_to_device,\n+)\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class ModelingUtilsTester(unittest.TestCase):\n+ def check_set_module_tensor_for_device(self, model, device1, device2):\n+ self.assertEqual(model.linear1.weight.device, torch.device(device1))\n+\n+ with self.subTest(\"Access by submodule and direct name for a parameter\"):\n+ set_module_tensor_to_device(model.linear1, \"weight\", device2)\n+ self.assertEqual(model.linear1.weight.device, torch.device(device2))\n+\n+ if torch.device(device2) == torch.device(\"meta\"):\n+ with self.assertRaises(ValueError):\n+ # We need a `value` to set the weight back on device1\n+ set_module_tensor_to_device(model.linear1, \"weight\", device1)\n+\n+ set_module_tensor_to_device(model.linear1, \"weight\", device1, value=torch.randn(4, 3))\n+ else:\n+ set_module_tensor_to_device(model.linear1, \"weight\", device1)\n+ self.assertEqual(model.linear1.weight.device, torch.device(device1))\n+\n+ with self.subTest(\"Access by module and full name for a parameter\"):\n+ set_module_tensor_to_device(model, \"linear1.weight\", device2)\n+ self.assertEqual(model.linear1.weight.device, torch.device(device2))\n+\n+ if torch.device(device2) == torch.device(\"meta\"):\n+ with self.assertRaises(ValueError):\n+ # We need a `value` to set the weight back on device1\n+ set_module_tensor_to_device(model, \"linear1.weight\", device1)\n+ set_module_tensor_to_device(model, \"linear1.weight\", device1, value=torch.randn(4, 3))\n+ else:\n+ set_module_tensor_to_device(model, \"linear1.weight\", device1)\n+ self.assertEqual(model.linear1.weight.device, torch.device(device1))\n+\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1))\n+\n+ with self.subTest(\"Access by submodule and direct name for a buffer\"):\n+ set_module_tensor_to_device(model.batchnorm, \"running_mean\", device2)\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2))\n+\n+ if torch.device(device2) == torch.device(\"meta\"):\n+ with self.assertRaises(ValueError):\n+ # We need a `value` to set the weight back on device1\n+ set_module_tensor_to_device(model.batchnorm, \"running_mean\", device1)\n+ set_module_tensor_to_device(model.batchnorm, \"running_mean\", device1, value=torch.randn(4))\n+ else:\n+ set_module_tensor_to_device(model.batchnorm, \"running_mean\", device1)\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1))\n+\n+ with self.subTest(\"Access by module and full name for a parameter\"):\n+ set_module_tensor_to_device(model, \"batchnorm.running_mean\", device2)\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2))\n+\n+ if torch.device(device2) == torch.device(\"meta\"):\n+ with self.assertRaises(ValueError):\n+ # We need a `value` to set the weight back on CPU\n+ set_module_tensor_to_device(model, \"batchnorm.running_mean\", device1)\n+\n+ set_module_tensor_to_device(model, \"batchnorm.running_mean\", device1, value=torch.randn(4))\n+ else:\n+ set_module_tensor_to_device(model, \"batchnorm.running_mean\", device1)\n+ self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1))\n+\n+ def test_set_module_tensor_to_meta_and_cpu(self):\n+ model = ModelForTest()\n+ self.check_set_module_tensor_for_device(model, \"cpu\", \"meta\")\n+\n+ @require_cuda\n+ def test_set_module_tensor_to_cpu_and_gpu(self):\n+ model = ModelForTest()\n+ self.check_set_module_tensor_for_device(model, \"cpu\", 0)\n+\n+ @require_cuda\n+ def test_set_module_tensor_to_meta_and_gpu(self):\n+ model = ModelForTest().to(0)\n+ self.check_set_module_tensor_for_device(model, 0, \"meta\")\n+\n+ @require_multi_gpu\n+ def test_set_module_tensor_between_gpus(self):\n+ model = ModelForTest().to(0)\n+ self.check_set_module_tensor_for_device(model, 0, 1)\n+\n+ def test_named_tensors(self):\n+ model = nn.BatchNorm1d(4)\n+ named_tensors = named_module_tensors(model)\n+ self.assertListEqual(\n+ [name for name, _ in named_tensors],\n+ [\"weight\", \"bias\", \"running_mean\", \"running_var\", \"num_batches_tracked\"],\n+ )\n+\n+ named_tensors = named_module_tensors(model, include_buffers=False)\n+ self.assertListEqual([name for name, _ in named_tensors], [\"weight\", \"bias\"])\n+\n+ model = ModelForTest()\n+ named_tensors = named_module_tensors(model)\n+ self.assertListEqual([name for name, _ in named_tensors], [])\n+\n+ named_tensors = named_module_tensors(model, recurse=True)\n+ self.assertListEqual(\n+ [name for name, _ in named_tensors],\n+ [\n+ \"linear1.weight\",\n+ \"linear1.bias\",\n+ \"batchnorm.weight\",\n+ \"batchnorm.bias\",\n+ \"linear2.weight\",\n+ \"linear2.bias\",\n+ \"batchnorm.running_mean\",\n+ \"batchnorm.running_var\",\n+ \"batchnorm.num_batches_tracked\",\n+ ],\n+ )\n+\n+ named_tensors = named_module_tensors(model, include_buffers=False, recurse=True)\n+ self.assertListEqual(\n+ [name for name, _ in named_tensors],\n+ [\"linear1.weight\", \"linear1.bias\", \"batchnorm.weight\", \"batchnorm.bias\", \"linear2.weight\", \"linear2.bias\"],\n+ )\n+\n+ def test_find_tied_parameters(self):\n+ model = ModelForTest()\n+ self.assertDictEqual(find_tied_parameters(model), {})\n+ model.linear2.weight = model.linear1.weight\n+ self.assertDictEqual(find_tied_parameters(model), {\"linear1.weight\": \"linear2.weight\"})\n+\n+ def test_compute_module_sizes(self):\n+ model = ModelForTest()\n+ expected_sizes = {\"\": 236, \"linear1\": 64, \"linear1.weight\": 48, \"linear1.bias\": 16}\n+ expected_sizes.update({\"linear2\": 100, \"linear2.weight\": 80, \"linear2.bias\": 20})\n+ expected_sizes.update({\"batchnorm\": 72, \"batchnorm.weight\": 16, \"batchnorm.bias\": 16})\n+ expected_sizes.update(\n+ {\"batchnorm.running_mean\": 16, \"batchnorm.running_var\": 16, \"batchnorm.num_batches_tracked\": 8}\n+ )\n+\n+ module_sizes = compute_module_sizes(model)\n+ self.assertDictEqual(module_sizes, expected_sizes)\n+\n+ model.half()\n+ expected_sizes = {k: s // 2 for k, s in expected_sizes.items()}\n+ # This one is not converted to half.\n+ expected_sizes[\"batchnorm.num_batches_tracked\"] = 8\n+ # This impacts batchnorm and total\n+ expected_sizes[\"batchnorm\"] += 4\n+ expected_sizes[\"\"] += 4\n+\n+ module_sizes = compute_module_sizes(model)\n+ self.assertDictEqual(module_sizes, expected_sizes)\n+\n+ def test_check_device_map(self):\n+ model = ModelForTest()\n+ check_device_map(model, {\"\": 0})\n+ with self.assertRaises(ValueError):\n+ check_device_map(model, {\"linear1\": 0, \"linear2\": 1})\n+\n+ check_device_map(model, {\"linear1\": 0, \"linear2\": 1, \"batchnorm\": 1})\n+\n+ def shard_test_model(self, model, tmp_dir):\n+ module_index = {\n+ \"linear1\": \"checkpoint_part1.bin\",\n+ \"batchnorm\": \"checkpoint_part2.bin\",\n+ \"linear2\": \"checkpoint_part3.bin\",\n+ }\n+ index = {}\n+ for name, _ in model.state_dict().items():\n+ module = name.split(\".\")[0]\n+ index[name] = module_index[module]\n+\n+ with open(os.path.join(tmp_dir, \"weight_map.index.json\"), \"w\") as f:\n+ json.dump(index, f)\n+\n+ for module, fname in module_index.items():\n+ state_dict = {k: v for k, v in model.state_dict().items() if k.startswith(module)}\n+ full_fname = os.path.join(tmp_dir, fname)\n+ torch.save(state_dict, full_fname)\n+\n+ def test_load_checkpoint_in_model(self):\n+ # Check with whole checkpoint\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname)\n+\n+ # Check with sharded index\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ self.shard_test_model(model, tmp_dir)\n+ index_file = os.path.join(tmp_dir, \"weight_map.index.json\")\n+ load_checkpoint_in_model(model, index_file)\n+\n+ # Check with sharded checkpoint\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ self.shard_test_model(model, tmp_dir)\n+ load_checkpoint_in_model(model, tmp_dir)\n+\n+ @require_cuda\n+ def test_load_checkpoint_in_model_one_gpu(self):\n+ device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": \"cpu\"}\n+\n+ # Check with whole checkpoint\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname, device_map=device_map)\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # Check with sharded index\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ self.shard_test_model(model, tmp_dir)\n+ index_file = os.path.join(tmp_dir, \"weight_map.index.json\")\n+ load_checkpoint_in_model(model, index_file, device_map=device_map)\n+\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ # Check with sharded checkpoint folder\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ self.shard_test_model(model, tmp_dir)\n+ load_checkpoint_in_model(model, tmp_dir, device_map=device_map)\n+\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(\"cpu\"))\n+\n+ @require_multi_gpu\n+ def test_load_checkpoint_in_model_two_gpu(self):\n+ device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": 1}\n+\n+ # Check with whole checkpoint\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ fname = os.path.join(tmp_dir, \"pt_model.bin\")\n+ torch.save(model.state_dict(), fname)\n+ load_checkpoint_in_model(model, fname, device_map=device_map)\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(1))\n+\n+ # Check with sharded index\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ self.shard_test_model(model, tmp_dir)\n+ index_file = os.path.join(tmp_dir, \"weight_map.index.json\")\n+ load_checkpoint_in_model(model, index_file, device_map=device_map)\n+\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(1))\n+\n+ # Check with sharded checkpoint\n+ model = ModelForTest()\n+ with tempfile.TemporaryDirectory() as tmp_dir:\n+ self.shard_test_model(model, tmp_dir)\n+ load_checkpoint_in_model(model, tmp_dir, device_map=device_map)\n+\n+ self.assertEqual(model.linear1.weight.device, torch.device(0))\n+ self.assertEqual(model.batchnorm.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(model.linear2.weight.device, torch.device(1))\n+\n+ def test_clean_device_map(self):\n+ # Regroup everything if all is on the same device\n+ self.assertDictEqual(clean_device_map({\"a\": 0, \"b\": 0, \"c\": 0}), {\"\": 0})\n+ # Regroups children of level 1 on the same device\n+ self.assertDictEqual(\n+ clean_device_map({\"a.x\": 0, \"a.y\": 0, \"b.x\": 1, \"b.y\": 1, \"c\": 1}), {\"a\": 0, \"b\": 1, \"c\": 1}\n+ )\n+ # Regroups children of level 2 on the same device\n+ self.assertDictEqual(\n+ clean_device_map({\"a.x\": 0, \"a.y\": 0, \"b.x.0\": 1, \"b.x.1\": 1, \"b.y.0\": 2, \"b.y.1\": 2, \"c\": 2}),\n+ {\"a\": 0, \"b.x\": 1, \"b.y\": 2, \"c\": 2},\n+ )\n+\n+ def test_infer_auto_device_map(self):\n+ model = ModelForTest()\n+ # model has size 236: linear1 64, batchnorm 72, linear2 100\n+\n+ device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200})\n+ # only linear1 fits on device 0 as we keep memory available for the maximum layer in case of offload\n+ self.assertDictEqual(device_map, {\"linear1\": 0, \"batchnorm\": 1, \"linear2\": 1})\n+\n+ device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 172, 2: 200})\n+ # On device 1, we don't care about keeping size available for the max layer, so even if there is just the\n+ # size available for batchnorm + linear2, they fit here.\n+ self.assertDictEqual(device_map, {\"linear1\": 0, \"batchnorm\": 1, \"linear2\": 1})\n+\n+ model.linear1.weight = model.linear2.weight\n+ device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200})\n+ # By tying weights, the whole model fits on device 0\n+ self.assertDictEqual(device_map, {\"\": 0})\n+\n+ # When splitting a bigger model, the split is done at the layer level\n+ model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest())\n+ device_map = infer_auto_device_map(model, max_memory={0: 500, 1: 500})\n+ self.assertDictEqual(device_map, {\"0\": 0, \"1.linear1\": 0, \"1.batchnorm\": 0, \"1.linear2\": 1, \"2\": 1})\n+\n+ # With no_split_module_classes, it's done at that module level\n+ model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest())\n+ device_map = infer_auto_device_map(\n+ model, max_memory={0: 500, 1: 500}, no_split_module_classes=[\"ModelForTest\"]\n+ )\n+ self.assertDictEqual(device_map, {\"0\": 0, \"1\": 1, \"2\": 1})\n+\n+ # Now if we have weights tied inside submodules, tied weights are on the same device.\n+ model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest())\n+ layer0 = getattr(model, \"0\")\n+ layer2 = getattr(model, \"2\")\n+ layer0.linear2.weight = layer2.linear2.weight\n+ device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500})\n+ expected = {\"0\": 0, \"2.linear2\": 0, \"1\": 1, \"2.linear1\": 1, \"2.batchnorm\": 1}\n+ self.assertDictEqual(device_map, expected)\ndiff --git a/tests/test_offload.py b/tests/test_offload.py\nnew file mode 100644\nindex 000000000..515d5094d\n--- /dev/null\n+++ b/tests/test_offload.py\n@@ -0,0 +1,87 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import unittest\n+from tempfile import TemporaryDirectory\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.utils import OffloadedWeightsLoader, offload_state_dict\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class OffloadTester(unittest.TestCase):\n+ def test_offload_state_dict(self):\n+ from tempfile import TemporaryDirectory\n+\n+ model = ModelForTest()\n+ with TemporaryDirectory() as tmp_dir:\n+ offload_state_dict(tmp_dir, model.state_dict())\n+ index_file = os.path.join(tmp_dir, \"index.json\")\n+ self.assertTrue(os.path.isfile(index_file))\n+ # TODO: add tests on what is inside the index\n+\n+ for key in [\"linear1.weight\", \"linear1.bias\", \"linear2.weight\", \"linear2.bias\"]:\n+ weight_file = os.path.join(tmp_dir, f\"{key}.dat\")\n+ self.assertTrue(os.path.isfile(weight_file))\n+ # TODO: add tests on the fact weights are properly loaded\n+\n+ def test_offload_weights_loader(self):\n+ model = ModelForTest()\n+ state_dict = model.state_dict()\n+ cpu_part = {k: v for k, v in state_dict.items() if \"linear2\" not in k}\n+ disk_part = {k: v for k, v in state_dict.items() if \"linear2\" in k}\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ offload_state_dict(tmp_dir, disk_part)\n+ weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir)\n+\n+ # Every key is there with the right value\n+ self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))\n+ for key, param in state_dict.items():\n+ self.assertTrue(torch.allclose(param, weight_map[key]))\n+\n+ cpu_part = {k: v for k, v in state_dict.items() if \"weight\" in k}\n+ disk_part = {k: v for k, v in state_dict.items() if \"weight\" not in k}\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ offload_state_dict(tmp_dir, disk_part)\n+ weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir)\n+\n+ # Every key is there with the right value\n+ self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))\n+ for key, param in state_dict.items():\n+ self.assertTrue(torch.allclose(param, weight_map[key]))\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ offload_state_dict(tmp_dir, state_dict)\n+ # Duplicates are removed\n+ weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir)\n+\n+ # Every key is there with the right value\n+ self.assertEqual(sorted(weight_map), sorted(state_dict.keys()))\n+ for key, param in state_dict.items():\n+ self.assertTrue(torch.allclose(param, weight_map[key]))\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex 0beba353a..f2414dfe5 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -20,7 +20,7 @@\n import torch\n \n from accelerate.test_utils.training import RegressionModel\n-from accelerate.utils import convert_outputs_to_fp32, patch_environment, send_to_device\n+from accelerate.utils import convert_outputs_to_fp32, find_device, patch_environment, send_to_device\n \n \n TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b c\")\n@@ -78,3 +78,8 @@ def test_convert_to_32_lets_model_pickle(self):\n model = RegressionModel()\n model.forward = convert_outputs_to_fp32(model.forward)\n _ = pickle.dumps(model)\n+\n+ def test_find_device(self):\n+ self.assertEqual(find_device([1, \"a\", torch.tensor([1, 2, 3])]), torch.device(\"cpu\"))\n+ self.assertEqual(find_device({\"a\": 1, \"b\": torch.tensor([1, 2, 3])}), torch.device(\"cpu\"))\n+ self.assertIsNone(find_device([1, \"a\"]))\n", "code_comments": [ { "body": "Nice! Think that's a good use of a context manager mechanism - it's quite intuitive that inside the context init uses no RAM.\r\n\r\n", "diff_hunk": "@@ -0,0 +1,203 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):", "from_author": false }, { "body": "```suggestion\r\n model's first parameter device.\r\n```", "diff_hunk": "@@ -0,0 +1,203 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.", "from_author": false }, { "body": "Could it make sense to have models automatically create such a map for themselves? Think this would have to be model-specific and to be in Transformers no?", "diff_hunk": "@@ -0,0 +1,204 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from tempfile import TemporaryDirectory\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights\n+from accelerate.hooks import remove_hook_from_submodules\n+from accelerate.test_utils import require_cuda, require_multi_gpu, slow\n+from accelerate.utils import offload_state_dict\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class BiggerModelForTest(nn.Sequential):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.linear2 = nn.Linear(4, 5)\n+ self.batchnorm = nn.BatchNorm1d(5)\n+ self.linear3 = nn.Linear(5, 6)\n+ self.linear4 = nn.Linear(6, 5)\n+\n+ def forward(self, x):\n+ return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n+\n+\n+class BigModelingTester(unittest.TestCase):\n+ def test_init_empty_weights(self):\n+ # base use\n+ with init_empty_weights():\n+ module = nn.Linear(4, 5)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+\n+ # base use with buffers, they are not touched\n+ with init_empty_weights():\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ # Use with include_buffers=True\n+ with init_empty_weights(include_buffers=True):\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"meta\"))\n+\n+ # Double check we didn't break PyTorch\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ def test_init_empty_weights_very_large_model(self):\n+ # This is a 100 billion parameters model.\n+ with init_empty_weights():\n+ _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+\n+ def test_cpu_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ cpu_offload(model, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ cpu_offload(model, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_cpu_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ cpu_offload(gpt2, execution_device=0)\n+ _ = gpt2.generate(inputs[\"input_ids\"])\n+\n+ def test_disk_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(model, tmp_dir, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_disk_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(gpt2, tmp_dir, execution_device=0)\n+ _ = gpt2.generate(inputs[\"input_ids\"])\n+\n+ @require_cuda\n+ def test_dispatch_model(self):\n+ model = ModelForTest()\n+ device_map = {\"linear1\": \"disk\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ dispatch_model(model, device_map, offload_dir=tmp_dir)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_multi_gpu\n+ def test_dispatch_model_multi_gpu(self):\n+ model = BiggerModelForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 1}", "from_author": false }, { "body": "Maybe worth adding a quick test here in the beginning that the parameter names of the model all have to be present in the `device_map()` so that people would get a quick / good error message?", "diff_hunk": "@@ -0,0 +1,203 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names to the device they should go to. Note that `\"disk\"` is accepted even if\n+ it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"", "from_author": false }, { "body": "Think a type hint for `max_memory` that it should be a dict could be useful here", "diff_hunk": "@@ -0,0 +1,454 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, named_parameters=None, prefix=\"\", result=None):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature has more arguments, but they are for the recursive part of this function and you should ignore them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ if result is None:\n+ result = {}\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory=None, buffer=\"1500MB\"):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: mem - convert_file_size_to_int(buffer) for i, mem in enumerate(torch.cuda.mem_get_info())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def infer_auto_device_map(model, max_memory=None, no_split_module_classes=None):", "from_author": false }, { "body": "This is what the `infer_auto_device_map` function does.", "diff_hunk": "@@ -0,0 +1,204 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from tempfile import TemporaryDirectory\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights\n+from accelerate.hooks import remove_hook_from_submodules\n+from accelerate.test_utils import require_cuda, require_multi_gpu, slow\n+from accelerate.utils import offload_state_dict\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class BiggerModelForTest(nn.Sequential):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.linear2 = nn.Linear(4, 5)\n+ self.batchnorm = nn.BatchNorm1d(5)\n+ self.linear3 = nn.Linear(5, 6)\n+ self.linear4 = nn.Linear(6, 5)\n+\n+ def forward(self, x):\n+ return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n+\n+\n+class BigModelingTester(unittest.TestCase):\n+ def test_init_empty_weights(self):\n+ # base use\n+ with init_empty_weights():\n+ module = nn.Linear(4, 5)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+\n+ # base use with buffers, they are not touched\n+ with init_empty_weights():\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ # Use with include_buffers=True\n+ with init_empty_weights(include_buffers=True):\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"meta\"))\n+\n+ # Double check we didn't break PyTorch\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ def test_init_empty_weights_very_large_model(self):\n+ # This is a 100 billion parameters model.\n+ with init_empty_weights():\n+ _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+\n+ def test_cpu_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ cpu_offload(model, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ cpu_offload(model, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_cpu_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ cpu_offload(gpt2, execution_device=0)\n+ _ = gpt2.generate(inputs[\"input_ids\"])\n+\n+ def test_disk_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(model, tmp_dir, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_disk_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n+ with TemporaryDirectory() as tmp_dir:\n+ disk_offload(gpt2, tmp_dir, execution_device=0)\n+ _ = gpt2.generate(inputs[\"input_ids\"])\n+\n+ @require_cuda\n+ def test_dispatch_model(self):\n+ model = ModelForTest()\n+ device_map = {\"linear1\": \"disk\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n+\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ with TemporaryDirectory() as tmp_dir:\n+ dispatch_model(model, device_map, offload_dir=tmp_dir)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n+\n+ @require_multi_gpu\n+ def test_dispatch_model_multi_gpu(self):\n+ model = BiggerModelForTest()\n+ device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 1}", "from_author": true }, { "body": "Yes, definitely needed!", "diff_hunk": "@@ -0,0 +1,203 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names to the device they should go to. Note that `\"disk\"` is accepted even if\n+ it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"", "from_author": true }, { "body": "```suggestion\r\n Whether or not to also put all buffers on the meta device while initializing.\r\n```", "diff_hunk": "@@ -0,0 +1,206 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.", "from_author": false }, { "body": "```suggestion\r\n model (`torch.nn.Module`): \r\n The model to offload.\r\n```", "diff_hunk": "@@ -0,0 +1,206 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.", "from_author": false }, { "body": "```suggestion\r\n model (`torch.nn.Module`): \r\n The model to dispatch.\r\n```", "diff_hunk": "@@ -0,0 +1,206 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to dispatch.", "from_author": false }, { "body": "I don't wonder if there's a clever way to check when someone tries this with a model named `nn.Sequential`, and give them hints as to where those are named. Because at first as a newbie I'd think \"maybe that's not supported\"? When in reality I'd need to check `net.state_dict()`.\r\n\r\nOne potential solution would be changing the phrasing some:\r\n\r\n```suggestion\r\n device_map (`Dict[str, Union[str, int, torch.device]]`):\r\n A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that `\"disk\"` is accepted even if\r\n it's not a proper value for `torch.device`.\r\n```\r\n", "diff_hunk": "@@ -0,0 +1,206 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import OffloadedWeightsLoader, check_device_map, extract_submodules_state_dict, offload_state_dict\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all bufeers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names to the device they should go to. Note that `\"disk\"` is accepted even if\n+ it's not a proper value for `torch.device`.", "from_author": false }, { "body": "While there is a warning in the docstring, I'm not wondering if perhaps add an extra warning here too? Or do we think that'd be too much feedback for the user", "diff_hunk": "@@ -0,0 +1,411 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import functools\n+from typing import Dict, Mapping, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device\n+\n+\n+class ModelHook:\n+ \"\"\"\n+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference\n+ with PyTorch existing hooks is that they get passed along the kwargs.\n+\n+ Class attribute:\n+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under\n+ the `torch.no_grad()` context manager.\n+ \"\"\"\n+\n+ no_grad = False\n+\n+ def init_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is attached to the module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module attached to this hook.\n+ \"\"\"\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ \"\"\"\n+ To be executed just before the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.\n+ args (`Tuple[Any]`): The positional arguments passed to the module.\n+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.\n+\n+ Returns:\n+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.\n+ \"\"\"\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ \"\"\"\n+ To be executed just after the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.\n+ output (`Any`): The output of the module.\n+\n+ Returns:\n+ `Any`: The processed `output`.\n+ \"\"\"\n+ return output\n+\n+ def detach_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is deached from a module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module detached from this hook.\n+ \"\"\"\n+ return module\n+\n+\n+class SequentialHook(ModelHook):\n+ \"\"\"\n+ A hook that can contain several hooks and iterates through them at each event.\n+ \"\"\"\n+\n+ def __init__(self, *hooks):\n+ self.hooks = hooks\n+\n+ def init_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.init_hook(module)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ for hook in self.hooks:\n+ args, kwargs = hook.pre_forward(module, *args, **kwargs)\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ for hook in self.hooks:\n+ output = hook.post_forward(module, output)\n+ return output\n+\n+ def detach_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.detach_hook(module)\n+ return module\n+\n+\n+def add_hook_to_module(module: nn.Module, hook: ModelHook):\n+ \"\"\"\n+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\n+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.\n+\n+ <Tip warning={true}>\n+\n+ If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\n+ use the `SequentialHook` class.\n+\n+ </Tip>\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+ hook (`ModelHook`): The hook to attach.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\n+ # If we already put some hook on this module, we replace it with the new one.\n+ old_forward = module._old_forward", "from_author": false }, { "body": "We should just keep those as kwargs then I think, since the user should never need to worry about them", "diff_hunk": "@@ -0,0 +1,489 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, named_parameters=None, prefix=\"\", result=None):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature has more arguments, but they are for the recursive part of this function and you should ignore them.\n+\n+ </Tip>", "from_author": false }, { "body": "Just to make sure I understand correctly, we just check to make sure it *runs*, we don't worry about if the outputs make sense?\r\n\r\nShould we just to make sure the params didn't get messed with at all, etc?", "diff_hunk": "@@ -0,0 +1,204 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from tempfile import TemporaryDirectory\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights\n+from accelerate.hooks import remove_hook_from_submodules\n+from accelerate.test_utils import require_cuda, require_multi_gpu, slow\n+from accelerate.utils import offload_state_dict\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class BiggerModelForTest(nn.Sequential):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.linear2 = nn.Linear(4, 5)\n+ self.batchnorm = nn.BatchNorm1d(5)\n+ self.linear3 = nn.Linear(5, 6)\n+ self.linear4 = nn.Linear(6, 5)\n+\n+ def forward(self, x):\n+ return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n+\n+\n+class BigModelingTester(unittest.TestCase):\n+ def test_init_empty_weights(self):\n+ # base use\n+ with init_empty_weights():\n+ module = nn.Linear(4, 5)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+\n+ # base use with buffers, they are not touched\n+ with init_empty_weights():\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ # Use with include_buffers=True\n+ with init_empty_weights(include_buffers=True):\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"meta\"))\n+\n+ # Double check we didn't break PyTorch\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ def test_init_empty_weights_very_large_model(self):\n+ # This is a 100 billion parameters model.\n+ with init_empty_weights():\n+ _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+\n+ def test_cpu_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ cpu_offload(model, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ cpu_offload(model, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_cpu_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")", "from_author": false }, { "body": "Too much feedback I believe. This is also very internal, I don't think the average user will use those hooks.", "diff_hunk": "@@ -0,0 +1,411 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import functools\n+from typing import Dict, Mapping, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device\n+\n+\n+class ModelHook:\n+ \"\"\"\n+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference\n+ with PyTorch existing hooks is that they get passed along the kwargs.\n+\n+ Class attribute:\n+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under\n+ the `torch.no_grad()` context manager.\n+ \"\"\"\n+\n+ no_grad = False\n+\n+ def init_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is attached to the module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module attached to this hook.\n+ \"\"\"\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ \"\"\"\n+ To be executed just before the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.\n+ args (`Tuple[Any]`): The positional arguments passed to the module.\n+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.\n+\n+ Returns:\n+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.\n+ \"\"\"\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ \"\"\"\n+ To be executed just after the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.\n+ output (`Any`): The output of the module.\n+\n+ Returns:\n+ `Any`: The processed `output`.\n+ \"\"\"\n+ return output\n+\n+ def detach_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is deached from a module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module detached from this hook.\n+ \"\"\"\n+ return module\n+\n+\n+class SequentialHook(ModelHook):\n+ \"\"\"\n+ A hook that can contain several hooks and iterates through them at each event.\n+ \"\"\"\n+\n+ def __init__(self, *hooks):\n+ self.hooks = hooks\n+\n+ def init_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.init_hook(module)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ for hook in self.hooks:\n+ args, kwargs = hook.pre_forward(module, *args, **kwargs)\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ for hook in self.hooks:\n+ output = hook.post_forward(module, output)\n+ return output\n+\n+ def detach_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.detach_hook(module)\n+ return module\n+\n+\n+def add_hook_to_module(module: nn.Module, hook: ModelHook):\n+ \"\"\"\n+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\n+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.\n+\n+ <Tip warning={true}>\n+\n+ If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\n+ use the `SequentialHook` class.\n+\n+ </Tip>\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+ hook (`ModelHook`): The hook to attach.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\n+ # If we already put some hook on this module, we replace it with the new one.\n+ old_forward = module._old_forward", "from_author": true }, { "body": "That would be nice to add in that test indeed!", "diff_hunk": "@@ -0,0 +1,204 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from tempfile import TemporaryDirectory\n+\n+import torch\n+import torch.nn as nn\n+\n+from accelerate.big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights\n+from accelerate.hooks import remove_hook_from_submodules\n+from accelerate.test_utils import require_cuda, require_multi_gpu, slow\n+from accelerate.utils import offload_state_dict\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n+\n+\n+class ModelForTest(nn.Module):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.batchnorm = nn.BatchNorm1d(4)\n+ self.linear2 = nn.Linear(4, 5)\n+\n+ def forward(self, x):\n+ return self.linear2(self.batchnorm(self.linear1(x)))\n+\n+\n+class BiggerModelForTest(nn.Sequential):\n+ def __init__(self):\n+ super().__init__()\n+ self.linear1 = nn.Linear(3, 4)\n+ self.linear2 = nn.Linear(4, 5)\n+ self.batchnorm = nn.BatchNorm1d(5)\n+ self.linear3 = nn.Linear(5, 6)\n+ self.linear4 = nn.Linear(6, 5)\n+\n+ def forward(self, x):\n+ return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n+\n+\n+class BigModelingTester(unittest.TestCase):\n+ def test_init_empty_weights(self):\n+ # base use\n+ with init_empty_weights():\n+ module = nn.Linear(4, 5)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+\n+ # base use with buffers, they are not touched\n+ with init_empty_weights():\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ # Use with include_buffers=True\n+ with init_empty_weights(include_buffers=True):\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"meta\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"meta\"))\n+\n+ # Double check we didn't break PyTorch\n+ module = nn.BatchNorm1d(4)\n+ self.assertEqual(module.weight.device, torch.device(\"cpu\"))\n+ self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n+\n+ def test_init_empty_weights_very_large_model(self):\n+ # This is a 100 billion parameters model.\n+ with init_empty_weights():\n+ _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+\n+ def test_cpu_offload(self):\n+ model = ModelForTest()\n+ x = torch.randn(2, 3)\n+ expected = model(x)\n+\n+ device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n+\n+ cpu_offload(model, execution_device=device)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ # Clean up for next test.\n+ remove_hook_from_submodules(model)\n+\n+ cpu_offload(model, execution_device=device, offload_buffers=True)\n+ output = model(x)\n+ self.assertTrue(torch.allclose(expected, output.cpu()))\n+\n+ @slow\n+ @require_cuda\n+ def test_cpu_offload_gpt2(self):\n+ tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n+ inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n+\n+ gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")", "from_author": true }, { "body": "Is there a situation where we wouldn't want to include buffers?", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.", "from_author": false }, { "body": "```suggestion\r\n # Will contain the input device when `io_same_device=True`.\r\n```", "diff_hunk": "@@ -0,0 +1,411 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import functools\n+from typing import Dict, Mapping, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device\n+\n+\n+class ModelHook:\n+ \"\"\"\n+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference\n+ with PyTorch existing hooks is that they get passed along the kwargs.\n+\n+ Class attribute:\n+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under\n+ the `torch.no_grad()` context manager.\n+ \"\"\"\n+\n+ no_grad = False\n+\n+ def init_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is attached to the module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module attached to this hook.\n+ \"\"\"\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ \"\"\"\n+ To be executed just before the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.\n+ args (`Tuple[Any]`): The positional arguments passed to the module.\n+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.\n+\n+ Returns:\n+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.\n+ \"\"\"\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ \"\"\"\n+ To be executed just after the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.\n+ output (`Any`): The output of the module.\n+\n+ Returns:\n+ `Any`: The processed `output`.\n+ \"\"\"\n+ return output\n+\n+ def detach_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is deached from a module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module detached from this hook.\n+ \"\"\"\n+ return module\n+\n+\n+class SequentialHook(ModelHook):\n+ \"\"\"\n+ A hook that can contain several hooks and iterates through them at each event.\n+ \"\"\"\n+\n+ def __init__(self, *hooks):\n+ self.hooks = hooks\n+\n+ def init_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.init_hook(module)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ for hook in self.hooks:\n+ args, kwargs = hook.pre_forward(module, *args, **kwargs)\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ for hook in self.hooks:\n+ output = hook.post_forward(module, output)\n+ return output\n+\n+ def detach_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.detach_hook(module)\n+ return module\n+\n+\n+def add_hook_to_module(module: nn.Module, hook: ModelHook):\n+ \"\"\"\n+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\n+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.\n+\n+ <Tip warning={true}>\n+\n+ If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\n+ use the `SequentialHook` class.\n+\n+ </Tip>\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+ hook (`ModelHook`): The hook to attach.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\n+ # If we already put some hook on this module, we replace it with the new one.\n+ old_forward = module._old_forward\n+ else:\n+ old_forward = module.forward\n+ module._old_forward = old_forward\n+\n+ module = hook.init_hook(module)\n+ module._hf_hook = hook\n+\n+ @functools.wraps(old_forward)\n+ def new_forward(*args, **kwargs):\n+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)\n+ if module._hf_hook.no_grad:\n+ with torch.no_grad():\n+ output = old_forward(*args, **kwargs)\n+ else:\n+ output = old_forward(*args, **kwargs)\n+ return module._hf_hook.post_forward(module, output)\n+\n+ module.forward = new_forward\n+ return module\n+\n+\n+def remove_hook_from_module(module: nn.Module):\n+ \"\"\"\n+ Removes any hook attached to a module via `add_hook_to_module`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\"):\n+ module._hf_hook.detach_hook(module)\n+ delattr(module, \"_hf_hook\")\n+\n+ if hasattr(module, \"_old_forward\"):\n+ module.forward = module._old_forward\n+ delattr(module, \"_old_forward\")\n+\n+ return module\n+\n+\n+class AlignDevicesHook(ModelHook):\n+ \"\"\"\n+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the\n+ associated module, potentially offloading the weights after the forward pass.\n+\n+ Args:\n+ execution_device (`torch.device`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass.\n+ io_same_device (`bool`, *optional*, defaults to `False`):\n+ Whether or not the output should be placed on the same device as the input was.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ place_submodules (`bool`, *optional*, defaults to `False`):\n+ Whether to place the submodules on `execution_device` during the `init_hook` event.\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ execution_device: Optional[Union[int, str, torch.device]] = None,\n+ offload: bool = False,\n+ io_same_device: bool = False,\n+ weights_map: Optional[Mapping] = None,\n+ offload_buffers: bool = False,\n+ place_submodules: bool = False,\n+ ):\n+ self.execution_device = execution_device\n+ self.offload = offload\n+ self.io_same_device = io_same_device\n+ self.weights_map = weights_map\n+ self.offload_buffers = offload_buffers\n+ self.place_submodules = place_submodules\n+\n+ # Will contain the input device when `output_on_same_device=True`.", "from_author": false }, { "body": "```suggestion\r\n raise ValueError(\"Need either a `state_dict` or a `save_folder` containing offloaded weights.\")\r\n```", "diff_hunk": "@@ -0,0 +1,143 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from collections.abc import Mapping\n+from typing import Dict, List, Optional, Union\n+\n+import numpy as np\n+import torch\n+\n+\n+def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):\n+ \"\"\"\n+ Offload a state dict in a given folder.\n+\n+ Args:\n+ save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict.\n+ state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload.\n+ \"\"\"\n+ os.makedirs(save_dir, exist_ok=True)\n+ index = {}\n+ for name, parameter in state_dict.items():\n+ tensor_file = os.path.join(save_dir, f\"{name}.dat\")\n+ array = parameter.numpy()\n+ index[name] = {\"dtype\": str(array.dtype), \"shape\": list(array.shape)}\n+ if array.ndim == 0:\n+ array = array[None]\n+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode=\"w+\", shape=array.shape)\n+ file_array[:] = array[:]\n+ file_array.flush()\n+\n+ # Update index\n+ index_file = os.path.join(save_dir, \"index.json\")\n+ if os.path.isfile(index_file):\n+ with open(index_file, \"r\", encoding=\"utf-8\") as f:\n+ current_index = json.load(f)\n+ else:\n+ current_index = {}\n+ current_index.update(index)\n+\n+ with open(index_file, \"w\", encoding=\"utf-8\") as f:\n+ json.dump(current_index, f, indent=2)\n+\n+\n+class PrefixedDataset(Mapping):\n+ \"\"\"\n+ Will access keys in a given dataset by adding a prefix.\n+\n+ Args:\n+ dataset (`Mapping`): Any map with string keys.\n+ prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.\n+ \"\"\"\n+\n+ def __init__(self, dataset: Mapping, prefix: str):\n+ self.dataset = dataset\n+ self.prefix = prefix\n+\n+ def __getitem__(self, key):\n+ return self.dataset[f\"{self.prefix}{key}\"]\n+\n+ def __iter__(self):\n+ return iter([key for key in self.dataset if key.startswith(self.prefix)])\n+\n+ def __len__(self):\n+ return len(self.dataset)\n+\n+\n+class OffloadedWeightsLoader(Mapping):\n+ \"\"\"\n+ A collection that loads weights stored in a given state dict or memory-mapped on disk.\n+\n+ Args:\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ A dictionary parameter name to tensor.\n+ save_folder (`str` or `os.PathLike`, *optional*):\n+ The directory in which the weights are stored (by `offload_state_dict` for instance).\n+ index (`Dict`, *optional*):\n+ A dictionary from weight name to their information (`dtype` and `shape`). Will default to the index saved\n+ in `save_folder`.\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ state_dict: Dict[str, torch.Tensor] = None,\n+ save_folder: Optional[Union[str, os.PathLike]] = None,\n+ index: Mapping = None,\n+ ):\n+ if state_dict is None and save_folder is None:\n+ raise ValueError(\"Need either a `state_dict` or a `save_folder` containint offloaded weights.\")", "from_author": false }, { "body": "when this condition happens, reassigning `max_layer_size` impacts line 377 wherein second largest `max_layer_size` gets subtracted instead of largest, is this understanding correct? If yes, this isn't expected behaviour I guess.\r\n\r\nSample test case model:\r\n```python\r\nclass MyCustomModel(nn.Module):\r\n def __init__():\r\n super(MyCustomModel, self).__init__()\r\n self.huge_layer = nn.Linear(100000, 100000) # 10 Billion params\r\n self.upscale_layer = nn.Linear(768, 100000) \r\n self.output_layer = nn.Linear(100000, 5) \r\n self.bert_model = AutoModel.from_pretrained(\"bert-base-uncased\") # 110 Million Params\r\n\r\n def forward(input_ids, attention_mask):\r\n _, pooler_output = self.bert_model(input_ids, attention_mask, return_dict=True)\r\n output = torch.nn.ReLU(self.upscale_layer(pooler_output))\r\n output = torch.nn.ReLU(self.huge_layer(output))\r\n output_logits = self.output_layer(output)\r\n return output_logits\r\n```", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(", "from_author": false }, { "body": "```suggestion\r\n # If one device and one offload, we've got one hook.\r\n```", "diff_hunk": "@@ -0,0 +1,411 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import functools\n+from typing import Dict, Mapping, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device\n+\n+\n+class ModelHook:\n+ \"\"\"\n+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference\n+ with PyTorch existing hooks is that they get passed along the kwargs.\n+\n+ Class attribute:\n+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under\n+ the `torch.no_grad()` context manager.\n+ \"\"\"\n+\n+ no_grad = False\n+\n+ def init_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is attached to the module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module attached to this hook.\n+ \"\"\"\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ \"\"\"\n+ To be executed just before the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.\n+ args (`Tuple[Any]`): The positional arguments passed to the module.\n+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.\n+\n+ Returns:\n+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.\n+ \"\"\"\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ \"\"\"\n+ To be executed just after the forward method of the model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.\n+ output (`Any`): The output of the module.\n+\n+ Returns:\n+ `Any`: The processed `output`.\n+ \"\"\"\n+ return output\n+\n+ def detach_hook(self, module):\n+ \"\"\"\n+ To be executed when the hook is deached from a module.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module detached from this hook.\n+ \"\"\"\n+ return module\n+\n+\n+class SequentialHook(ModelHook):\n+ \"\"\"\n+ A hook that can contain several hooks and iterates through them at each event.\n+ \"\"\"\n+\n+ def __init__(self, *hooks):\n+ self.hooks = hooks\n+\n+ def init_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.init_hook(module)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ for hook in self.hooks:\n+ args, kwargs = hook.pre_forward(module, *args, **kwargs)\n+ return args, kwargs\n+\n+ def post_forward(self, module, output):\n+ for hook in self.hooks:\n+ output = hook.post_forward(module, output)\n+ return output\n+\n+ def detach_hook(self, module):\n+ for hook in self.hooks:\n+ module = hook.detach_hook(module)\n+ return module\n+\n+\n+def add_hook_to_module(module: nn.Module, hook: ModelHook):\n+ \"\"\"\n+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove\n+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.\n+\n+ <Tip warning={true}>\n+\n+ If the module already contains a hook, this will replace it with the new hook passed. To chain two hooks together,\n+ use the `SequentialHook` class.\n+\n+ </Tip>\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+ hook (`ModelHook`): The hook to attach.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\") and hasattr(module, \"_old_forward\"):\n+ # If we already put some hook on this module, we replace it with the new one.\n+ old_forward = module._old_forward\n+ else:\n+ old_forward = module.forward\n+ module._old_forward = old_forward\n+\n+ module = hook.init_hook(module)\n+ module._hf_hook = hook\n+\n+ @functools.wraps(old_forward)\n+ def new_forward(*args, **kwargs):\n+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)\n+ if module._hf_hook.no_grad:\n+ with torch.no_grad():\n+ output = old_forward(*args, **kwargs)\n+ else:\n+ output = old_forward(*args, **kwargs)\n+ return module._hf_hook.post_forward(module, output)\n+\n+ module.forward = new_forward\n+ return module\n+\n+\n+def remove_hook_from_module(module: nn.Module):\n+ \"\"\"\n+ Removes any hook attached to a module via `add_hook_to_module`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module to attach a hook to.\n+\n+ Returns:\n+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can\n+ be discarded).\n+ \"\"\"\n+ if hasattr(module, \"_hf_hook\"):\n+ module._hf_hook.detach_hook(module)\n+ delattr(module, \"_hf_hook\")\n+\n+ if hasattr(module, \"_old_forward\"):\n+ module.forward = module._old_forward\n+ delattr(module, \"_old_forward\")\n+\n+ return module\n+\n+\n+class AlignDevicesHook(ModelHook):\n+ \"\"\"\n+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the\n+ associated module, potentially offloading the weights after the forward pass.\n+\n+ Args:\n+ execution_device (`torch.device`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass.\n+ io_same_device (`bool`, *optional*, defaults to `False`):\n+ Whether or not the output should be placed on the same device as the input was.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ place_submodules (`bool`, *optional*, defaults to `False`):\n+ Whether to place the submodules on `execution_device` during the `init_hook` event.\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ execution_device: Optional[Union[int, str, torch.device]] = None,\n+ offload: bool = False,\n+ io_same_device: bool = False,\n+ weights_map: Optional[Mapping] = None,\n+ offload_buffers: bool = False,\n+ place_submodules: bool = False,\n+ ):\n+ self.execution_device = execution_device\n+ self.offload = offload\n+ self.io_same_device = io_same_device\n+ self.weights_map = weights_map\n+ self.offload_buffers = offload_buffers\n+ self.place_submodules = place_submodules\n+\n+ # Will contain the input device when `output_on_same_device=True`.\n+ self.input_device = None\n+ self.param_original_devices = {}\n+ self.buffer_original_devices = {}\n+\n+ def init_hook(self, module):\n+ if not self.offload and self.execution_device is not None:\n+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):\n+ set_module_tensor_to_device(module, name, self.execution_device)\n+ elif self.offload:\n+ self.original_devices = {name: param.device for name, param in named_module_tensors(module)}\n+ if self.weights_map is None:\n+ self.weights_map = {\n+ name: param.to(\"cpu\")\n+ for name, param in named_module_tensors(module, include_buffers=self.offload_buffers)\n+ }\n+\n+ for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ set_module_tensor_to_device(module, name, \"meta\")\n+ if not self.offload_buffers and self.execution_device is not None:\n+ for name, _ in module.named_buffers(recurse=False):\n+ set_module_tensor_to_device(module, name, self.execution_device)\n+ return module\n+\n+ def pre_forward(self, module, *args, **kwargs):\n+ if self.io_same_device:\n+ self.input_device = find_device([args, kwargs])\n+ if self.offload:\n+ for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ set_module_tensor_to_device(module, name, self.execution_device, value=self.weights_map[name])\n+\n+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)\n+\n+ def post_forward(self, module, output):\n+ if self.offload:\n+ for name, _ in named_module_tensors(module, include_buffers=self.offload_buffers):\n+ set_module_tensor_to_device(module, name, \"meta\")\n+\n+ if self.io_same_device and self.input_device is not None:\n+ output = send_to_device(output, self.input_device)\n+\n+ return output\n+\n+ def detach_hook(self, module):\n+ if self.offload:\n+ for name, device in self.original_devices.items():\n+ if device != torch.device(\"meta\"):\n+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))\n+\n+\n+def attach_align_device_hook(\n+ module: torch.nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload: bool = False,\n+ weights_map: Optional[Mapping] = None,\n+ offload_buffers: bool = False,\n+ module_name: str = \"\",\n+):\n+ \"\"\"\n+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or\n+ buffers.\n+\n+ Args:\n+ module (`torch.nn.Module`):\n+ The module where we want to attach the hooks.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ module_name (`str`, *optional*, defaults to `\"\"`):\n+ The name of the module.\n+ \"\"\"\n+ # Attach the hook on this module if it has any direct tensor.\n+ directs = named_module_tensors(module)\n+ if len(list(directs)) > 0:\n+ if weights_map is not None:\n+ prefix = f\"{module_name}.\" if len(module_name) > 0 else \"\"\n+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)\n+ else:\n+ prefixed_weights_map = None\n+ hook = AlignDevicesHook(\n+ execution_device=execution_device,\n+ offload=offload,\n+ weights_map=prefixed_weights_map,\n+ offload_buffers=offload_buffers,\n+ )\n+ add_hook_to_module(module, hook)\n+\n+ # Recurse on all children of the module.\n+ for child_name, child in module.named_children():\n+ child_name = f\"{module_name}.{child_name}\" if len(module_name) > 0 else child_name\n+ attach_align_device_hook(\n+ child,\n+ execution_device=execution_device,\n+ offload=offload,\n+ weights_map=weights_map,\n+ offload_buffers=offload_buffers,\n+ module_name=child_name,\n+ )\n+\n+\n+def remove_hook_from_submodules(module: nn.Module):\n+ \"\"\"\n+ Recursively removes all hooks attached on the submodules of a given model.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module on which to remove all hooks.\n+ \"\"\"\n+ remove_hook_from_module(module)\n+ for child in module.children():\n+ remove_hook_from_submodules(child)\n+\n+\n+def attach_align_device_hook_on_blocks(\n+ module: nn.Module,\n+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,\n+ offload: Union[bool, Dict[str, bool]] = False,\n+ weights_map: Mapping = None,\n+ offload_buffers: bool = False,\n+ module_name: str = \"\",\n+):\n+ \"\"\"\n+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.\n+\n+ Args:\n+ module (`torch.nn.Module`):\n+ The module where we want to attach the hooks.\n+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):\n+ The device on which inputs and model weights should be placed before the forward pass. It can be one device\n+ for the whole module, or a dictionary mapping module name to device.\n+ offload (`bool`, *optional*, defauts to `False`):\n+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole\n+ module, or a dictionary mapping module name to boolean.\n+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):\n+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to include the associated module's buffers when offloading.\n+ module_name (`str`, *optional*, defaults to `\"\"`):\n+ The name of the module.\n+ \"\"\"\n+ # If one device and one offload, we've go one hook.", "from_author": false }, { "body": "```suggestion\r\n execution_device = next(iter(model.parameters())).device\r\n```", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device", "from_author": false }, { "body": "```suggestion\r\n execution_device = next(iter(model.parameters())).device\r\n```", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device", "from_author": false }, { "body": "This doesn't contain the `str` which should be accepted as `auto` is valid :)\r\n```suggestion\r\n device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,\r\n```", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that\n+ `\"disk\"` is accepted even if it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ # Error early if the device map is incomplete.\n+ check_device_map(model, device_map)\n+\n+ if main_device is None:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n+\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n+ if state_dict is None and len(cpu_modules) > 0:\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n+\n+ disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n+ if offload_dir is None and len(disk_modules) > 0:\n+ raise ValueError(\n+ \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\n+ f\"need to be offloaded: {', '.join(disk_modules)}.\"\n+ )\n+ if len(disk_modules) > 0 and (\n+ not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\n+ ):\n+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\n+ offload_state_dict(offload_dir, disk_state_dict)\n+\n+ execution_device = {\n+ name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\n+ }\n+ offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\n+ if state_dict is not None or offload_dir is not None:\n+ weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=offload_dir)\n+ else:\n+ weights_map = None\n+\n+ attach_align_device_hook_on_blocks(\n+ model,\n+ execution_device=execution_device,\n+ offload=offload,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ return model\n+\n+\n+def load_checkpoint_and_dispatch(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,", "from_author": false }, { "body": "This `device_map` is super interesting for introspection inside the model. I'd be very interested in having this as an attribute of the model once it has been dispatched, for example as:\r\n```py\r\nmodel = load_checkpoint_and_dispatch(\r\n model,\r\n ...\r\n)\r\nmodel.device_map\r\n``` ", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that\n+ `\"disk\"` is accepted even if it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ # Error early if the device map is incomplete.\n+ check_device_map(model, device_map)\n+\n+ if main_device is None:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n+\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n+ if state_dict is None and len(cpu_modules) > 0:\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n+\n+ disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n+ if offload_dir is None and len(disk_modules) > 0:\n+ raise ValueError(\n+ \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\n+ f\"need to be offloaded: {', '.join(disk_modules)}.\"\n+ )\n+ if len(disk_modules) > 0 and (\n+ not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\n+ ):\n+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\n+ offload_state_dict(offload_dir, disk_state_dict)\n+\n+ execution_device = {\n+ name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\n+ }\n+ offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\n+ if state_dict is not None or offload_dir is not None:\n+ weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=offload_dir)\n+ else:\n+ weights_map = None\n+\n+ attach_align_device_hook_on_blocks(\n+ model,\n+ execution_device=execution_device,\n+ offload=offload,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ return model\n+\n+\n+def load_checkpoint_and_dispatch(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded and adds the various hooks that will make this model run properly (even if split across devices).\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+\n+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map=\"auto\"`.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU\n+ and the available CPU RAM if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as\n+ well as the parameters.\n+ \"\"\"\n+ if device_map == \"auto\":\n+ device_map = infer_auto_device_map(\n+ model, max_memory=max_memory, no_split_module_classes=no_split_module_classes\n+ )", "from_author": false }, { "body": "This is likely the place where most memory errors will occur, if the checkpoints have not been sharded. Probably out of scope for this PR, but we could add a nice error message mentioning how to shard the checkpoint if it fails here.", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ # Assess size needed\n+ module_size = module_sizes[name]\n+ tied_params = [v for k, v in tied_parameters.items() if name in k]\n+ # We ignore parameters that are tied when they're tied to > 1 one\n+ tied_param = tied_params[0] if len(tied_params) == 1 else None\n+\n+ device = devices[current_device]\n+ current_max_size = max_memory[device] if device != \"disk\" else None\n+ # Reduce max size available by the largest layer.\n+ if devices[current_device] in main_devices:\n+ current_max_size = current_max_size - max_layer_size\n+ # Case 1 -> We're too big!\n+ if current_max_size is not None and current_memory_used + module_size > current_max_size:\n+ # Split or not split?\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # -> no split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # -> split, we replace the module studied by its children + parameters\n+ modules_children = list(module.named_parameters(recurse=False)) + modules_children\n+ modules_to_treat = [(f\"{name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+\n+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.\n+ elif tied_param is not None:\n+ # Determine the sized occupied by this module + the module containing the tied parameter\n+ tied_module_size = module_size\n+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]\n+ tied_module_name, tied_module = modules_to_treat[tied_module_index]\n+ tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]\n+ if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:\n+ # Split or not split?\n+ tied_module_children = list(tied_module.named_children())\n+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:\n+ # If the tied module is not split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # Otherwise, we replace the tied module by its children.\n+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children\n+ tied_module_children = [(f\"{tied_module_name}.{n}\", v) for n, v in tied_module_children]\n+ modules_to_treat = (\n+ [(name, module)]\n+ + modules_to_treat[:tied_module_index]\n+ + tied_module_children\n+ + modules_to_treat[tied_module_index + 1 :]\n+ )\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ else:\n+ # We really really fit!\n+ current_memory_used += tied_module_size\n+ device_map[name] = devices[current_device]\n+ modules_to_treat.pop(tied_module_index)\n+ device_map[tied_module_name] = devices[current_device]\n+ else:\n+ current_memory_used += module_size\n+ device_map[name] = devices[current_device]\n+\n+ return clean_device_map(device_map)\n+\n+\n+def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):\n+ \"\"\"\n+ Checks a device map covers everything in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to check the device map against.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.\n+ \"\"\"\n+ all_model_tensors = [name for name, _ in model.state_dict().items()]\n+ for module_name in device_map.keys():\n+ all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]\n+ if len(all_model_tensors) > 0:\n+ non_covered_params = \", \".join(all_model_tensors)\n+ raise ValueError(\n+ f\"The device_map provided does not give any device for the following parameters: {non_covered_params}\"\n+ )\n+\n+\n+def load_checkpoint_in_model(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded.\n+\n+ <Tip warning={true}>\n+\n+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To\n+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ \"\"\"\n+ if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n+ raise ValueError(\n+ \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"\n+ )\n+\n+ checkpoint_files = None\n+ index_filename = None\n+ if os.path.isfile(checkpoint):\n+ if str(checkpoint).endswith(\".json\"):\n+ index_filename = checkpoint\n+ else:\n+ checkpoint_files = [checkpoint]\n+ elif os.path.isdir(checkpoint):\n+ potential_index = [f for f in os.listdir(checkpoint) if f.endswith(\".index.json\")]\n+ if len(potential_index) == 0:\n+ raise ValueError(f\"{checkpoint} is not a folder containing a `.index.json` file.\")\n+ elif len(potential_index) == 1:\n+ index_filename = os.path.join(checkpoint, potential_index[0])\n+ else:\n+ raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\n+ else:\n+ raise ValueError(\n+ \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\n+ f\"checkpoint, or a folder containing a sharded checkpoint, but got {checkpoint}.\"\n+ )\n+\n+ if index_filename is not None:\n+ checkpoint_folder = os.path.split(index_filename)[0]\n+ with open(index_filename, \"r\") as f:\n+ index = json.loads(f.read())\n+\n+ if \"weight_map\" in index:\n+ index = index[\"weight_map\"]\n+ checkpoint_files = sorted(list(set(index.values())))\n+ checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]\n+\n+ # Logic for missing/unexepected keys goes here.\n+\n+ offload_index = {}\n+ for checkpoint_file in checkpoint_files:\n+ checkpoint = torch.load(checkpoint_file)", "from_author": false }, { "body": "Should this default to `auto`? Right now it's optional, but not passing it results in an error:\r\n```\r\nraceback (most recent call last):\r\n File \"/home/lysandre/Workspaces/Python/transformers/.env/lib/python3.9/site-packages/IPython/core/interactiveshell.py\", line 3397, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-2-f756f1b4a3d4>\", line 3, in <cell line: 3>\r\n runfile('/home/lysandre/.config/JetBrains/PyCharm2021.3/scratches/OPT/playground2.py', wdir='/home/lysandre/.config/JetBrains/PyCharm2021.3/scratches/OPT')\r\n File \"/opt/pycharm-professional/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py\", line 198, in runfile\r\n pydev_imports.execfile(filename, global_vars, local_vars) # execute the script\r\n File \"/opt/pycharm-professional/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py\", line 18, in execfile\r\n exec(compile(contents+\"\\n\", file, 'exec'), glob, loc)\r\n File \"/home/lysandre/.config/JetBrains/PyCharm2021.3/scratches/OPT/playground2.py\", line 8, in <module>\r\n model = load_checkpoint_and_dispatch(\r\n File \"/home/lysandre/Workspaces/Python/accelerate/src/accelerate/big_modeling.py\", line 260, in load_checkpoint_and_dispatch\r\n return dispatch_model(model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers)\r\n File \"/home/lysandre/Workspaces/Python/accelerate/src/accelerate/big_modeling.py\", line 178, in dispatch_model\r\n check_device_map(model, device_map)\r\n File \"/home/lysandre/Workspaces/Python/accelerate/src/accelerate/utils/modeling.py\", line 451, in check_device_map\r\n for module_name in device_map.keys():\r\nAttributeError: 'NoneType' object has no attribute 'keys'\r\n```", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that\n+ `\"disk\"` is accepted even if it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ # Error early if the device map is incomplete.\n+ check_device_map(model, device_map)\n+\n+ if main_device is None:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n+\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n+ if state_dict is None and len(cpu_modules) > 0:\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n+\n+ disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n+ if offload_dir is None and len(disk_modules) > 0:\n+ raise ValueError(\n+ \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\n+ f\"need to be offloaded: {', '.join(disk_modules)}.\"\n+ )\n+ if len(disk_modules) > 0 and (\n+ not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\n+ ):\n+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\n+ offload_state_dict(offload_dir, disk_state_dict)\n+\n+ execution_device = {\n+ name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\n+ }\n+ offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\n+ if state_dict is not None or offload_dir is not None:\n+ weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=offload_dir)\n+ else:\n+ weights_map = None\n+\n+ attach_align_device_hook_on_blocks(\n+ model,\n+ execution_device=execution_device,\n+ offload=offload,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ return model\n+\n+\n+def load_checkpoint_and_dispatch(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,", "from_author": false }, { "body": "`max_memory` is a dictionary, which is unordered in python <= 3.6.\r\n\r\nHere, we're relying on the ordering of the dictionary in order to retrieve the `main_devices`. I'm open to hearing what you think; I would push to always consider dictionaries as unordered and to treat them as such, but as we're dropping python 3.6 I understand if you want to have the asumption that dictionaries are ordered.\r\n\r\nThese lists could likely be `set`s however: `devices` should theoretically not have the same device twice, and the same holds for `main_devices`.\r\n\r\nIf no GPU is available, then `devices` is a list containing `[\"cpu\", \"disk\"]`. In that case, we end up with `main_devices` containing `['cpu', 'cpu']`.", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]", "from_author": false }, { "body": "If I understand this correctly, this will return an empty device map if everything fits on a single device. If it receives the following device map, for example:\r\n```py\r\nclean_device_map({'transformer': 0, 'lm_head': 0})\r\n```\r\nit will return the following:\r\n```\r\n{'': 0}\r\n```\r\nwhich, unfortunately, cannot be digested by the code later on as keys cannot find a common radix", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]", "from_author": false }, { "body": "Nitpick: this fails right now if we specify a checkpoint like we would do in the transformers library. For example, this fails:\r\n\r\n```py\r\nwith init_empty_weights():\r\n model = GPT2LMHeadModel.from_pretrained('./gpt2')\r\n\r\nmodel = load_checkpoint_and_dispatch(\r\n model,\r\n \"./gpt2\",\r\n device_map='auto'\r\n)\r\n```\r\n\r\nwhereas intuitively I would expect it to work. I understand if that's wanted behavior and that passing the `pytorch_model.bin` is what you want instead.", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ # Assess size needed\n+ module_size = module_sizes[name]\n+ tied_params = [v for k, v in tied_parameters.items() if name in k]\n+ # We ignore parameters that are tied when they're tied to > 1 one\n+ tied_param = tied_params[0] if len(tied_params) == 1 else None\n+\n+ device = devices[current_device]\n+ current_max_size = max_memory[device] if device != \"disk\" else None\n+ # Reduce max size available by the largest layer.\n+ if devices[current_device] in main_devices:\n+ current_max_size = current_max_size - max_layer_size\n+ # Case 1 -> We're too big!\n+ if current_max_size is not None and current_memory_used + module_size > current_max_size:\n+ # Split or not split?\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # -> no split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # -> split, we replace the module studied by its children + parameters\n+ modules_children = list(module.named_parameters(recurse=False)) + modules_children\n+ modules_to_treat = [(f\"{name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+\n+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.\n+ elif tied_param is not None:\n+ # Determine the sized occupied by this module + the module containing the tied parameter\n+ tied_module_size = module_size\n+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]\n+ tied_module_name, tied_module = modules_to_treat[tied_module_index]\n+ tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]\n+ if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:\n+ # Split or not split?\n+ tied_module_children = list(tied_module.named_children())\n+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:\n+ # If the tied module is not split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # Otherwise, we replace the tied module by its children.\n+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children\n+ tied_module_children = [(f\"{tied_module_name}.{n}\", v) for n, v in tied_module_children]\n+ modules_to_treat = (\n+ [(name, module)]\n+ + modules_to_treat[:tied_module_index]\n+ + tied_module_children\n+ + modules_to_treat[tied_module_index + 1 :]\n+ )\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ else:\n+ # We really really fit!\n+ current_memory_used += tied_module_size\n+ device_map[name] = devices[current_device]\n+ modules_to_treat.pop(tied_module_index)\n+ device_map[tied_module_name] = devices[current_device]\n+ else:\n+ current_memory_used += module_size\n+ device_map[name] = devices[current_device]\n+\n+ return clean_device_map(device_map)\n+\n+\n+def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):\n+ \"\"\"\n+ Checks a device map covers everything in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to check the device map against.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.\n+ \"\"\"\n+ all_model_tensors = [name for name, _ in model.state_dict().items()]\n+ for module_name in device_map.keys():\n+ all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]\n+ if len(all_model_tensors) > 0:\n+ non_covered_params = \", \".join(all_model_tensors)\n+ raise ValueError(\n+ f\"The device_map provided does not give any device for the following parameters: {non_covered_params}\"\n+ )\n+\n+\n+def load_checkpoint_in_model(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded.\n+\n+ <Tip warning={true}>\n+\n+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To\n+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ \"\"\"\n+ if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n+ raise ValueError(\n+ \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"\n+ )\n+\n+ checkpoint_files = None\n+ index_filename = None\n+ if os.path.isfile(checkpoint):\n+ if str(checkpoint).endswith(\".json\"):\n+ index_filename = checkpoint\n+ else:\n+ checkpoint_files = [checkpoint]\n+ elif os.path.isdir(checkpoint):", "from_author": false }, { "body": "If you want a reproducible code example, I've tried with `gpt2` which I can load entirely in my model's memory. For example:\r\n\r\n```py\r\nfrom accelerate import init_empty_weights, load_checkpoint_and_dispatch\r\nfrom transformers import GPT2LMHeadModel\r\nimport torch\r\n\r\nGPT2LMHeadModel.from_pretrained('gpt2').save_pretrained('./gpt2')\r\n\r\nwith init_empty_weights():\r\n model = GPT2LMHeadModel.from_pretrained('gpt2')\r\n\r\nmodel = load_checkpoint_and_dispatch(\r\n model,\r\n \"./gpt2/pytorch_model.bin\",\r\n device_map='auto'\r\n)\r\n\r\n```", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]", "from_author": false }, { "body": "This and the code below tells me that it may not have been made so that `cpu` is a `main_device` :smile: \r\nI'll stop bugging you with it.", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that\n+ `\"disk\"` is accepted even if it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ # Error early if the device map is incomplete.\n+ check_device_map(model, device_map)\n+\n+ if main_device is None:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]", "from_author": false }, { "body": "Since buffers are way smaller in most cases, I've ignored them in everything by default.", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.", "from_author": true }, { "body": "Good catch!", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device", "from_author": true }, { "body": "I'm not completely following. If we are treating the layer with the maximum number of parameters, it will be taken into account as `module_size` later on. We thus need to consider the largest layer remaining in the model (so second largest)", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(", "from_author": true }, { "body": "Assuming the modeling file is called `pytorch_model.bin` is very Transformers-centric. For Accelerate, I don't want to make this assumption.", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ # Assess size needed\n+ module_size = module_sizes[name]\n+ tied_params = [v for k, v in tied_parameters.items() if name in k]\n+ # We ignore parameters that are tied when they're tied to > 1 one\n+ tied_param = tied_params[0] if len(tied_params) == 1 else None\n+\n+ device = devices[current_device]\n+ current_max_size = max_memory[device] if device != \"disk\" else None\n+ # Reduce max size available by the largest layer.\n+ if devices[current_device] in main_devices:\n+ current_max_size = current_max_size - max_layer_size\n+ # Case 1 -> We're too big!\n+ if current_max_size is not None and current_memory_used + module_size > current_max_size:\n+ # Split or not split?\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # -> no split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # -> split, we replace the module studied by its children + parameters\n+ modules_children = list(module.named_parameters(recurse=False)) + modules_children\n+ modules_to_treat = [(f\"{name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+\n+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.\n+ elif tied_param is not None:\n+ # Determine the sized occupied by this module + the module containing the tied parameter\n+ tied_module_size = module_size\n+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]\n+ tied_module_name, tied_module = modules_to_treat[tied_module_index]\n+ tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]\n+ if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:\n+ # Split or not split?\n+ tied_module_children = list(tied_module.named_children())\n+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:\n+ # If the tied module is not split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # Otherwise, we replace the tied module by its children.\n+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children\n+ tied_module_children = [(f\"{tied_module_name}.{n}\", v) for n, v in tied_module_children]\n+ modules_to_treat = (\n+ [(name, module)]\n+ + modules_to_treat[:tied_module_index]\n+ + tied_module_children\n+ + modules_to_treat[tied_module_index + 1 :]\n+ )\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ else:\n+ # We really really fit!\n+ current_memory_used += tied_module_size\n+ device_map[name] = devices[current_device]\n+ modules_to_treat.pop(tied_module_index)\n+ device_map[tied_module_name] = devices[current_device]\n+ else:\n+ current_memory_used += module_size\n+ device_map[name] = devices[current_device]\n+\n+ return clean_device_map(device_map)\n+\n+\n+def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):\n+ \"\"\"\n+ Checks a device map covers everything in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to check the device map against.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.\n+ \"\"\"\n+ all_model_tensors = [name for name, _ in model.state_dict().items()]\n+ for module_name in device_map.keys():\n+ all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]\n+ if len(all_model_tensors) > 0:\n+ non_covered_params = \", \".join(all_model_tensors)\n+ raise ValueError(\n+ f\"The device_map provided does not give any device for the following parameters: {non_covered_params}\"\n+ )\n+\n+\n+def load_checkpoint_in_model(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded.\n+\n+ <Tip warning={true}>\n+\n+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To\n+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ \"\"\"\n+ if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n+ raise ValueError(\n+ \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"\n+ )\n+\n+ checkpoint_files = None\n+ index_filename = None\n+ if os.path.isfile(checkpoint):\n+ if str(checkpoint).endswith(\".json\"):\n+ index_filename = checkpoint\n+ else:\n+ checkpoint_files = [checkpoint]\n+ elif os.path.isdir(checkpoint):", "from_author": true }, { "body": "Sounds good, will likely become very intuitive after implementing it in `transformers` so happy to keep it this way :+1: ", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ # Assess size needed\n+ module_size = module_sizes[name]\n+ tied_params = [v for k, v in tied_parameters.items() if name in k]\n+ # We ignore parameters that are tied when they're tied to > 1 one\n+ tied_param = tied_params[0] if len(tied_params) == 1 else None\n+\n+ device = devices[current_device]\n+ current_max_size = max_memory[device] if device != \"disk\" else None\n+ # Reduce max size available by the largest layer.\n+ if devices[current_device] in main_devices:\n+ current_max_size = current_max_size - max_layer_size\n+ # Case 1 -> We're too big!\n+ if current_max_size is not None and current_memory_used + module_size > current_max_size:\n+ # Split or not split?\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # -> no split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # -> split, we replace the module studied by its children + parameters\n+ modules_children = list(module.named_parameters(recurse=False)) + modules_children\n+ modules_to_treat = [(f\"{name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+\n+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.\n+ elif tied_param is not None:\n+ # Determine the sized occupied by this module + the module containing the tied parameter\n+ tied_module_size = module_size\n+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]\n+ tied_module_name, tied_module = modules_to_treat[tied_module_index]\n+ tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]\n+ if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:\n+ # Split or not split?\n+ tied_module_children = list(tied_module.named_children())\n+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:\n+ # If the tied module is not split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # Otherwise, we replace the tied module by its children.\n+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children\n+ tied_module_children = [(f\"{tied_module_name}.{n}\", v) for n, v in tied_module_children]\n+ modules_to_treat = (\n+ [(name, module)]\n+ + modules_to_treat[:tied_module_index]\n+ + tied_module_children\n+ + modules_to_treat[tied_module_index + 1 :]\n+ )\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ else:\n+ # We really really fit!\n+ current_memory_used += tied_module_size\n+ device_map[name] = devices[current_device]\n+ modules_to_treat.pop(tied_module_index)\n+ device_map[tied_module_name] = devices[current_device]\n+ else:\n+ current_memory_used += module_size\n+ device_map[name] = devices[current_device]\n+\n+ return clean_device_map(device_map)\n+\n+\n+def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):\n+ \"\"\"\n+ Checks a device map covers everything in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to check the device map against.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.\n+ \"\"\"\n+ all_model_tensors = [name for name, _ in model.state_dict().items()]\n+ for module_name in device_map.keys():\n+ all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]\n+ if len(all_model_tensors) > 0:\n+ non_covered_params = \", \".join(all_model_tensors)\n+ raise ValueError(\n+ f\"The device_map provided does not give any device for the following parameters: {non_covered_params}\"\n+ )\n+\n+\n+def load_checkpoint_in_model(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded.\n+\n+ <Tip warning={true}>\n+\n+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To\n+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ \"\"\"\n+ if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n+ raise ValueError(\n+ \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"\n+ )\n+\n+ checkpoint_files = None\n+ index_filename = None\n+ if os.path.isfile(checkpoint):\n+ if str(checkpoint).endswith(\".json\"):\n+ index_filename = checkpoint\n+ else:\n+ checkpoint_files = [checkpoint]\n+ elif os.path.isdir(checkpoint):", "from_author": false }, { "body": "I will store it in `model.hf_device_map` to avoid conflicts with potential existing `device_map`.", "diff_hunk": "@@ -0,0 +1,260 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+from typing import Dict, List, Optional, Union\n+\n+import torch\n+import torch.nn as nn\n+\n+from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks\n+from .utils import (\n+ OffloadedWeightsLoader,\n+ check_device_map,\n+ extract_submodules_state_dict,\n+ infer_auto_device_map,\n+ load_checkpoint_in_model,\n+ offload_state_dict,\n+)\n+\n+\n+@contextmanager\n+def init_empty_weights(include_buffers: bool = False):\n+ \"\"\"\n+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an\n+ empty model. Useful when just initializing the model would blow the available RAM.\n+\n+ Args:\n+ include_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to also put all buffers on the meta device while initializing.\n+\n+ Example:\n+\n+ ```pyton\n+ import torch.nn as nn\n+ from accelerate import init_empty_weights\n+\n+ # Initialize a model with 100 billions parameters in no time and without using any RAM.\n+ with init_empty_weights():\n+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n+ ```\n+ \"\"\"\n+ old_register_parameter = nn.Module.register_parameter\n+ if include_buffers:\n+ old_register_buffer = nn.Module.register_buffer\n+\n+ def register_empty_parameter(module, name, param):\n+ old_register_parameter(module, name, param)\n+ if param is not None:\n+ module._parameters[name] = nn.Parameter(module._parameters[name].to(torch.device(\"meta\")))\n+\n+ def register_empty_buffer(module, name, buffer):\n+ old_register_buffer(module, name, buffer)\n+ if buffer is not None:\n+ module._buffers[name] = module._buffers[name].to(torch.device(\"meta\"))\n+\n+ try:\n+ nn.Module.register_parameter = register_empty_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = register_empty_buffer\n+ yield\n+ finally:\n+ nn.Module.register_parameter = old_register_parameter\n+ if include_buffers:\n+ nn.Module.register_buffer = old_register_buffer\n+\n+\n+def cpu_offload(\n+ model: nn.Module,\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+):\n+ \"\"\"\n+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one\n+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that\n+ state dict and put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to offload.\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the model that will be kept on CPU.\n+ \"\"\"\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ if state_dict is None:\n+ state_dict = {n: p.to(\"cpu\") for n, p in model.state_dict().items()}\n+ attach_align_device_hook(\n+ model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def disk_offload(\n+ model: nn.Module,\n+ offload_dir: Union[str, os.PathLike],\n+ execution_device: Optional[torch.device] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as\n+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and\n+ put on the execution device passed as they are needed, then offloaded again.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to offload.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ execution_device (`torch.device`, *optional*):\n+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the\n+ model's first parameter device.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\")):\n+ offload_state_dict(offload_dir, model.state_dict())\n+ if execution_device is None:\n+ execution_device = next(iter(model.parameter())).device\n+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)\n+ attach_align_device_hook(\n+ model,\n+ execution_device=execution_device,\n+ offload=True,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True))\n+ return model\n+\n+\n+def dispatch_model(\n+ model: nn.Module,\n+ device_map: Dict[str, Union[str, int, torch.device]],\n+ main_device: Optional[torch.device] = None,\n+ state_dict: Optional[Dict[str, torch.Tensor]] = None,\n+ offload_dir: Union[str, os.PathLike] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on\n+ the CPU or even the disk.\n+\n+ Args:\n+ model (`torch.nn.Module`):\n+ The model to dispatch.\n+ device_map (`Dict[str, Union[str, int, torch.device]]`):\n+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that\n+ `\"disk\"` is accepted even if it's not a proper value for `torch.device`.\n+ main_device (`str`, `int` or `torch.device`, *optional*):\n+ The main execution device. Will default to the first device in the `device_map` different from `\"cpu\"` or\n+ `\"disk\"`.\n+ state_dict (`Dict[str, torch.Tensor]`, *optional*):\n+ The state dict of the part of the model that will be kept on CPU.\n+ offload_dir (`str` or `os.PathLike`):\n+ The folder in which to offload the model weights (or where the model weights are already offloaded).\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ Whether or not to offload the buffers with the model parameters.\n+ \"\"\"\n+ # Error early if the device map is incomplete.\n+ check_device_map(model, device_map)\n+\n+ if main_device is None:\n+ main_device = [d for d in device_map.values() if d not in [\"cpu\", \"disk\"]][0]\n+\n+ cpu_modules = [name for name, device in device_map.items() if device == \"cpu\"]\n+ if state_dict is None and len(cpu_modules) > 0:\n+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)\n+\n+ disk_modules = [name for name, device in device_map.items() if device == \"disk\"]\n+ if offload_dir is None and len(disk_modules) > 0:\n+ raise ValueError(\n+ \"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules \"\n+ f\"need to be offloaded: {', '.join(disk_modules)}.\"\n+ )\n+ if len(disk_modules) > 0 and (\n+ not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, \"index.json\"))\n+ ):\n+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)\n+ offload_state_dict(offload_dir, disk_state_dict)\n+\n+ execution_device = {\n+ name: main_device if device in [\"cpu\", \"disk\"] else device for name, device in device_map.items()\n+ }\n+ offload = {name: device in [\"cpu\", \"disk\"] for name, device in device_map.items()}\n+ if state_dict is not None or offload_dir is not None:\n+ weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=offload_dir)\n+ else:\n+ weights_map = None\n+\n+ attach_align_device_hook_on_blocks(\n+ model,\n+ execution_device=execution_device,\n+ offload=offload,\n+ offload_buffers=offload_buffers,\n+ weights_map=weights_map,\n+ )\n+ return model\n+\n+\n+def load_checkpoint_and_dispatch(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+ offload_buffers: bool = False,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded and adds the various hooks that will make this model run properly (even if split across devices).\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+\n+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map=\"auto\"`.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU\n+ and the available CPU RAM if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ offload_buffers (`bool`, *optional*, defaults to `False`):\n+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as\n+ well as the parameters.\n+ \"\"\"\n+ if device_map == \"auto\":\n+ device_map = infer_auto_device_map(\n+ model, max_memory=max_memory, no_split_module_classes=no_split_module_classes\n+ )", "from_author": true }, { "body": "We can add more defensive checks indeed, in new PRs.", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]\n+\n+ module_sizes = compute_module_sizes(model)\n+ tied_parameters = find_tied_parameters(model)\n+\n+ device_map = {}\n+ current_device = 0\n+ current_memory_used = 0\n+\n+ # Direct submodules and parameters\n+ modules_to_treat = list(model.named_parameters(recurse=False)) + list(model.named_children())\n+ # Initialize maximum largest layer, to know which space to keep in memory\n+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)\n+\n+ # Ready ? This is going to be a bit messy.\n+ while len(modules_to_treat) > 0:\n+ name, module = modules_to_treat.pop(0)\n+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.\n+ max_layer_names = [n for n in max_layer_names if not n.startswith(name)]\n+ if len(max_layer_names) == 0:\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ # Assess size needed\n+ module_size = module_sizes[name]\n+ tied_params = [v for k, v in tied_parameters.items() if name in k]\n+ # We ignore parameters that are tied when they're tied to > 1 one\n+ tied_param = tied_params[0] if len(tied_params) == 1 else None\n+\n+ device = devices[current_device]\n+ current_max_size = max_memory[device] if device != \"disk\" else None\n+ # Reduce max size available by the largest layer.\n+ if devices[current_device] in main_devices:\n+ current_max_size = current_max_size - max_layer_size\n+ # Case 1 -> We're too big!\n+ if current_max_size is not None and current_memory_used + module_size > current_max_size:\n+ # Split or not split?\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # -> no split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # -> split, we replace the module studied by its children + parameters\n+ modules_children = list(module.named_parameters(recurse=False)) + modules_children\n+ modules_to_treat = [(f\"{name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+\n+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.\n+ elif tied_param is not None:\n+ # Determine the sized occupied by this module + the module containing the tied parameter\n+ tied_module_size = module_size\n+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]\n+ tied_module_name, tied_module = modules_to_treat[tied_module_index]\n+ tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param]\n+ if current_max_size is not None and current_memory_used + tied_module_size > current_max_size:\n+ # Split or not split?\n+ tied_module_children = list(tied_module.named_children())\n+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:\n+ # If the tied module is not split, we go to the next device\n+ current_device += 1\n+ modules_to_treat = [(name, module)] + modules_to_treat\n+ current_memory_used = 0\n+ else:\n+ # Otherwise, we replace the tied module by its children.\n+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children\n+ tied_module_children = [(f\"{tied_module_name}.{n}\", v) for n, v in tied_module_children]\n+ modules_to_treat = (\n+ [(name, module)]\n+ + modules_to_treat[:tied_module_index]\n+ + tied_module_children\n+ + modules_to_treat[tied_module_index + 1 :]\n+ )\n+ # Update the max layer size.\n+ max_layer_size, max_layer_names = get_max_layer_size(\n+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],\n+ module_sizes,\n+ no_split_module_classes,\n+ )\n+ else:\n+ # We really really fit!\n+ current_memory_used += tied_module_size\n+ device_map[name] = devices[current_device]\n+ modules_to_treat.pop(tied_module_index)\n+ device_map[tied_module_name] = devices[current_device]\n+ else:\n+ current_memory_used += module_size\n+ device_map[name] = devices[current_device]\n+\n+ return clean_device_map(device_map)\n+\n+\n+def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):\n+ \"\"\"\n+ Checks a device map covers everything in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to check the device map against.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.\n+ \"\"\"\n+ all_model_tensors = [name for name, _ in model.state_dict().items()]\n+ for module_name in device_map.keys():\n+ all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)]\n+ if len(all_model_tensors) > 0:\n+ non_covered_params = \", \".join(all_model_tensors)\n+ raise ValueError(\n+ f\"The device_map provided does not give any device for the following parameters: {non_covered_params}\"\n+ )\n+\n+\n+def load_checkpoint_in_model(\n+ model: nn.Module,\n+ checkpoint: Union[str, os.PathLike],\n+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,\n+ offload_folder: Optional[Union[str, os.PathLike]] = None,\n+):\n+ \"\"\"\n+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are\n+ loaded.\n+\n+ <Tip warning={true}>\n+\n+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To\n+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.\n+ checkpoint (`str` or `os.PathLike`):\n+ The folder checkpoint to load. It can be:\n+ - a path to a file containing a whole model state dict\n+ - a path to a `.json` file containing the index to a sharded checkpoint\n+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.\n+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):\n+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer\n+ name, once a given module name is inside, every submodule of it will be sent to the same device.\n+ offload_folder (`str` or `os.PathLike`, *optional*):\n+ If the `device_map` contains any value `\"disk\"`, the folder where we will offload weights.\n+ \"\"\"\n+ if offload_folder is None and device_map is not None and \"disk\" in device_map.values():\n+ raise ValueError(\n+ \"At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`.\"\n+ )\n+\n+ checkpoint_files = None\n+ index_filename = None\n+ if os.path.isfile(checkpoint):\n+ if str(checkpoint).endswith(\".json\"):\n+ index_filename = checkpoint\n+ else:\n+ checkpoint_files = [checkpoint]\n+ elif os.path.isdir(checkpoint):\n+ potential_index = [f for f in os.listdir(checkpoint) if f.endswith(\".index.json\")]\n+ if len(potential_index) == 0:\n+ raise ValueError(f\"{checkpoint} is not a folder containing a `.index.json` file.\")\n+ elif len(potential_index) == 1:\n+ index_filename = os.path.join(checkpoint, potential_index[0])\n+ else:\n+ raise ValueError(f\"{checkpoint} containing mote than one `.index.json` file, delete the irrelevant ones.\")\n+ else:\n+ raise ValueError(\n+ \"`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded \"\n+ f\"checkpoint, or a folder containing a sharded checkpoint, but got {checkpoint}.\"\n+ )\n+\n+ if index_filename is not None:\n+ checkpoint_folder = os.path.split(index_filename)[0]\n+ with open(index_filename, \"r\") as f:\n+ index = json.loads(f.read())\n+\n+ if \"weight_map\" in index:\n+ index = index[\"weight_map\"]\n+ checkpoint_files = sorted(list(set(index.values())))\n+ checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]\n+\n+ # Logic for missing/unexepected keys goes here.\n+\n+ offload_index = {}\n+ for checkpoint_file in checkpoint_files:\n+ checkpoint = torch.load(checkpoint_file)", "from_author": true }, { "body": "This shouldn't rely on the order (we don't care if the \"main\" GPU is 0, 1, etc.) we just need to make sure we don't end up with `\"cpu\"` twice :-)", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]\n+\n+ # Recurse over the children\n+ children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)]\n+ idx = len(module_name.split(\".\")) + 1 if len(module_name) > 0 else 1\n+ children_modules = set(\".\".join(k.split(\".\")[:idx]) for k in children_modules)\n+ for child in children_modules:\n+ clean_device_map(device_map, module_name=child)\n+\n+ return device_map\n+\n+\n+def infer_auto_device_map(\n+ model: nn.Module,\n+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,\n+ no_split_module_classes: Optional[List[str]] = None,\n+):\n+ \"\"\"\n+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,\n+ such that:\n+ - we don't exceed the memory available of any of the GPU.\n+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that\n+ has the largest size.\n+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.\n+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk\n+ that has the largest size.\n+\n+ <Tip>\n+\n+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the\n+ meta device (as it would if initialized within the `init_empty_weights` context manager).\n+\n+ </Tip>\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to analyze.\n+ max_memory (`Dict`, *optional*):\n+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.\n+ no_split_module_classes (`List[str]`, *optional*):\n+ A list of layer class names that should never be split across device (for instance any layer that has a\n+ residual connection).\n+ \"\"\"\n+ # Get default / clean up max_memory\n+ max_memory = get_max_memory(max_memory)\n+ if no_split_module_classes is None:\n+ no_split_module_classes = []\n+ elif not isinstance(no_split_module_classes, (list, tuple)):\n+ no_split_module_classes = [no_split_module_classes]\n+\n+ devices = list(max_memory.keys()) + [\"disk\"]\n+ # Devices that need to keep space for a potential offloaded layer.\n+ main_devices = [devices[0], \"cpu\"]", "from_author": true }, { "body": "Fixed", "diff_hunk": "@@ -0,0 +1,568 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import gc\n+import json\n+import os\n+import re\n+from collections import defaultdict\n+from typing import Dict, List, Optional, Tuple, Union\n+\n+import numpy as np\n+import torch\n+import torch.nn as nn\n+\n+\n+WEIGHTS_INDEX_NAME = \"pytorch_model.bin.index.json\"\n+\n+\n+def convert_file_size_to_int(size: Union[int, str]):\n+ \"\"\"\n+ Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n+\n+ Args:\n+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n+\n+ Example:\n+\n+ ```py\n+ >>> convert_file_size_to_int(\"1MiB\")\n+ 1048576\n+ ```\n+ \"\"\"\n+ if isinstance(size, int):\n+ return size\n+ if size.upper().endswith(\"GIB\"):\n+ return int(size[:-3]) * (2**30)\n+ if size.upper().endswith(\"MIB\"):\n+ return int(size[:-3]) * (2**20)\n+ if size.upper().endswith(\"KIB\"):\n+ return int(size[:-3]) * (2**10)\n+ if size.upper().endswith(\"GB\"):\n+ int_size = int(size[:-2]) * (10**9)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"MB\"):\n+ int_size = int(size[:-2]) * (10**6)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ if size.upper().endswith(\"KB\"):\n+ int_size = int(size[:-2]) * (10**3)\n+ return int_size // 8 if size.endswith(\"b\") else int_size\n+ raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")\n+\n+\n+def dtype_byte_size(dtype: torch.dtype):\n+ \"\"\"\n+ Returns the size (in bytes) occupied by one parameter of type `dtype`.\n+\n+ Example:\n+\n+ ```py\n+ >>> dtype_byte_size(torch.float32)\n+ 4\n+ ```\n+ \"\"\"\n+ if dtype == torch.bool:\n+ return 1 / 8\n+ bit_search = re.search(\"[^\\d](\\d+)$\", str(dtype))\n+ if bit_search is None:\n+ raise ValueError(f\"`dtype` is not a valid dtype: {dtype}.\")\n+ bit_size = int(bit_search.groups()[0])\n+ return bit_size // 8\n+\n+\n+def set_module_tensor_to_device(\n+ module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None\n+):\n+ \"\"\"\n+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).\n+\n+ Args:\n+ module (`torch.nn.Module`): The module in which the tensor we want to move lives.\n+ param_name (`str`): The full name of the parameter/buffer.\n+ device (`int`, `str` or `torch.device`): The device on which to set the tensor.\n+ value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any\n+ other device).\n+ \"\"\"\n+ # Recurse if needed\n+ if \".\" in tensor_name:\n+ splits = tensor_name.split(\".\")\n+ for split in splits[:-1]:\n+ new_module = getattr(module, split)\n+ if new_module is None:\n+ raise ValueError(f\"{module} has no attribute {split}.\")\n+ module = new_module\n+ tensor_name = splits[-1]\n+\n+ if tensor_name not in module._parameters and tensor_name not in module._buffers:\n+ raise ValueError(f\"{module} does not have a parameter or a buffer named {tensor_name}.\")\n+ is_buffer = tensor_name in module._buffers\n+ old_value = getattr(module, tensor_name)\n+\n+ if old_value.device == torch.device(\"meta\") and device != torch.device(\"meta\") and value is None:\n+ raise ValueError(f\"{tensor_name} is on the meta device, we need a `value` to put in on {device}.\")\n+\n+ with torch.no_grad():\n+ if value is None:\n+ new_value = old_value.to(device)\n+ elif isinstance(value, torch.Tensor):\n+ new_value = value.to(device)\n+ else:\n+ new_value = torch.tensor(value, device=device)\n+ if is_buffer:\n+ module._buffers[tensor_name] = new_value\n+ else:\n+ new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n+ module._parameters[tensor_name] = new_value\n+\n+\n+def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):\n+ \"\"\"\n+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`\n+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.\n+\n+ Args:\n+ module (`torch.nn.Module`): The module we want the tensors or.\n+ include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result.\n+ recurse (`bool`, *optional`, defaults to `False`):\n+ Whether or not to go look in every submodule or just return the direct parameters and buffers.\n+ \"\"\"\n+ for named_parameter in module.named_parameters(recurse=recurse):\n+ yield named_parameter\n+\n+ if include_buffers:\n+ for named_buffer in module.named_buffers(recurse=recurse):\n+ yield named_buffer\n+\n+\n+def find_tied_parameters(model: nn.Module, **kwargs):\n+ \"\"\"\n+ Find the tied parameters in a given model.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to inspect.\n+\n+ <Tip warning={true}>\n+\n+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore\n+ them.\n+\n+ </Tip>\n+\n+ Example:\n+\n+\n+ ```py\n+ >>> from collections import OrderedDict\n+ >>> import torch.nn as nn\n+\n+ >>> model = nn.Sequential(OrderedDict([(\"linear1\", nn.Linear(4, 4)), (\"linear2\", nn.Linear(4, 4))]))\n+ >>> model.linear2.weight = test_model.linear1.weight\n+ >>> find_tied_parameters(test_model)\n+ {'linear1.weight': 'linear2.weight'}\n+ ```\n+\n+ Returns:\n+ Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to.\n+ \"\"\"\n+ # Initialize result and named_parameters before recursing.\n+ named_parameters = kwargs.get(\"named_parameters\", None)\n+ prefix = kwargs.get(\"prefix\", \"\")\n+ result = kwargs.get(\"result\", {})\n+\n+ if named_parameters is None:\n+ named_parameters = {n: p for n, p in model.named_parameters()}\n+ else:\n+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`\n+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial\n+ # `named_parameters`.\n+ for name, parameter in model.named_parameters():\n+ full_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ if full_name not in named_parameters:\n+ # When we find one, it has to be one of the existing parameters.\n+ for new_name, new_param in named_parameters.items():\n+ if new_param is parameter:\n+ result[new_name] = full_name\n+\n+ # Once we have treated direct parameters, we move to the child modules.\n+ for name, child in model.named_children():\n+ child_name = name if prefix == \"\" else f\"{prefix}.{name}\"\n+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)\n+\n+ return result\n+\n+\n+def compute_module_sizes(model: nn.Module):\n+ \"\"\"\n+ Compute the size of each submodule of a given model.\n+ \"\"\"\n+ module_sizes = defaultdict(int)\n+ for name, tensor in named_module_tensors(model, recurse=True):\n+ size = tensor.numel() * dtype_byte_size(tensor.dtype)\n+ name_parts = name.split(\".\")\n+ for idx in range(len(name_parts) + 1):\n+ module_sizes[\".\".join(name_parts[:idx])] += size\n+\n+ return module_sizes\n+\n+\n+def get_max_layer_size(\n+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]\n+):\n+ \"\"\"\n+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The\n+ definition of a layer being:\n+ - a module with no direct children (just parameters and buffers)\n+ - a module whose class name is in the list `no_split_module_classes`\n+\n+ Args:\n+ modules (`List[Tuple[str, torch.nn.Module]]`):\n+ The list of named modules where we want to determine the maximum layer size.\n+ module_sizes (`Dict[str, int]`):\n+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).\n+ no_split_module_classes (`List[str]`):\n+ A list of class names for layers we don't want to be split.\n+\n+ Returns:\n+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.\n+ \"\"\"\n+ max_size = 0\n+ layer_names = []\n+ modules_to_treat = modules.copy()\n+ while len(modules_to_treat) > 0:\n+ module_name, module = modules_to_treat.pop(0)\n+ modules_children = list(module.named_children())\n+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:\n+ # No splitting this one so we compare to the max_size\n+ size = module_sizes[module_name]\n+ if size > max_size:\n+ max_size = size\n+ layer_names = [module_name]\n+ elif size == max_size:\n+ layer_names.append(module_name)\n+ else:\n+ modules_to_treat = [(f\"{module_name}.{n}\", v) for n, v in modules_children] + modules_to_treat\n+ return max_size, layer_names\n+\n+\n+def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):\n+ \"\"\"\n+ Get the maximum memory available if nothing is passed, converts string to int otherwise.\n+ \"\"\"\n+ import psutil\n+\n+ if max_memory is None:\n+ if not torch.cuda.is_available():\n+ max_memory = {}\n+ else:\n+ # Make sure CUDA is initialized on each GPU to have the right memory info.\n+ for i in range(torch.cuda.device_count()):\n+ _ = torch.tensor([0], device=i)\n+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}\n+ max_memory[\"cpu\"] = psutil.virtual_memory().available\n+ return max_memory\n+\n+ for key in max_memory:\n+ if isinstance(max_memory[key], str):\n+ max_memory[key] = convert_file_size_to_int(max_memory[key])\n+ return max_memory\n+\n+\n+def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = \"\"):\n+ \"\"\"\n+ Cleans a device_map by grouping all submodules that go on the same device together.\n+ \"\"\"\n+ # Get the value of the current module and if there is only one split across several keys, regroup it.\n+ values = [v for k, v in device_map.items() if k.startswith(module_name)]\n+ if len(set(values)) == 1 and len(values) > 1:\n+ for k in [k for k in device_map if k.startswith(module_name)]:\n+ del device_map[k]\n+ device_map[module_name] = values[0]", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Super cool PR! Two questions:\r\n\r\n1. How fast is generate with parts of the weights offloaded compared to not offloaded? Guess it strongly depends on the hardware, but e.g. on a V100, for let's say `gpt2`?\r\n\r\n2. More generally, do you think we could eventually do the following code:\r\n\r\n```py\r\ncheckpoint = \"bigscience/T0pp\"\r\nconfig = AutoConfig.from_pretrained(checkpoint)\r\n\r\n# Initializes an empty shell with the model. This is instant and does not take any RAM.\r\nwith init_empty_weights():\r\n model = AutoModelForSeq2SeqLM.from_config(config)\r\n# Initialize the model under the previous context manager breaks the tied weights.\r\nmodel.tie_weights()\r\n# Determine a device map that maximizes the available GPUs, you can also write your own.\r\n# If using a different model, adjust `T5Block` to the proper class (for instance `\"GPTJBlock\"`)\r\ndevice_map = infer_auto_device_map(model, no_split_module_classes=[\"T5Block\"])\r\n# Load the sharded checkpoint inside the model. This will load each part of the model on the device specified by `device_map`\r\nload_sharded_checkpoint_in_model(model, path_to_local_t0pp, device_map=device_map)\r\n# This will make that model that leaves on several different devices just work.\r\nmodel = dispatch_model(model, device_map)\r\n```\r\n\r\nfully in Tranformers' `from_pretrained(...)` method with an optional flag? E.g. from a user's perspective wouldn't it be very cool to be able to just do something liike:\r\n\r\n```py\r\nfrom transformers import AutoModelForSeq2SeqLM\r\n\r\nmodel = AutoModelForSeq2SeqLM.from_pretrained(checkpoint, device_map=\"auto\")\r\n```\r\n\r\nwould this be desirable in a future PR?", "from_author": false }, { "body": "I'll post more benchmarks toward the end but basically:\r\n- having the model on 2 GPUs instead of one for generate does not change speed. (I do have an NVlink though, so maybe it could slower, though there is only one change of GPU per forward pass, so it's not much)\r\n- offloading to CPU his 10-15x slower than GPU (for the part offloaded, which is why it's import to maximize GPU use before using CPU offload)\r\n- offloading to disk is 20-30x slower than GPU.", "from_author": true }, { "body": "> I'll post more benchmarks toward the end but basically:\r\n> \r\n> * having the model on 2 GPUs instead of one for generate does not change speed. (I do have an NVlink though, so maybe it could slower, though there is only one change of GPU per forward pass, so it's not much)\r\n> * offloading to CPU his 10-15x slower than GPU (for the part offloaded, which is why it's import to maximize GPU use before using CPU offload)\r\n> * offloading to disk is 20-30x slower than GPU.\r\n\r\nIt would be interesting to compare to [deepseed inference](https://www.deepspeed.ai/tutorials/inference-tutorial/). \r\n", "from_author": false }, { "body": "I doubt it will be faster @philschmid but it won't be a black box that replaces your model.", "from_author": true }, { "body": "Could support be added for TPU inference as well?", "from_author": false }, { "body": "> Could support be added for TPU inference as well?\r\n\r\nNone of the things introduced here are efficient on TPUs: the model parallelism is vertical (a few layers are executed on GPU 0, then the next ones on GPU 1) which is not efficient on TPU which are more designed for horizontal parallelism, and the CPU offload would be excruciatingly slow on TPU, since the communication CPU/TPU is a huge bottleneck there.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/345", "pr_id": 929082943 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 559fda41c..8ff69e048 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -132,12 +132,20 @@ def get_cluster_input():\n else:\n main_training_function = \"main\"\n \n- num_processes = _ask_field(\n- \"How many devices should be used for (potentially) distributed training? [1]:\",\n- lambda x: int(x),\n- default=1,\n- error_message=\"Please enter an integer.\",\n- )\n+ if distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.TPU]:\n+ machine_type = str(distributed_type).split(\".\")[0].replace(\"MULTI_\", \"\")\n+ if machine_type == \"TPU\":\n+ machine_type += \" cores\"\n+ else:\n+ machine_type += \"'s\"\n+ num_processes = _ask_field(\n+ f\"How many {machine_type} should be used for distributed training? [1]:\",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n+ else:\n+ num_processes = 1\n \n if distributed_type != DistributedType.TPU:\n mixed_precision = _ask_field(\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/344", "pr_id": 928887894 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex cfab0f7f8..559fda41c 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -133,7 +133,7 @@ def get_cluster_input():\n main_training_function = \"main\"\n \n num_processes = _ask_field(\n- \"How many processes in total will you use? [1]: \",\n+ \"How many devices should be used for (potentially) distributed training? [1]:\",\n lambda x: int(x),\n default=1,\n error_message=\"Please enter an integer.\",\n", "code_comments": [ { "body": "```suggestion\r\n \"How many devices should be used for (potentially) distributed training? [1]:\",\r\n```", "diff_hunk": "@@ -133,7 +133,7 @@ def get_cluster_input():\n main_training_function = \"main\"\n \n num_processes = _ask_field(\n- \"How many processes in total will you use? [1]: \",\n+ \"How many devices should be used for (potentially) distributed training? [1:]\",", "from_author": false } ], "context": [ { "body": "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_343). All of your documentation changes will be reflected on that endpoint.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/343", "pr_id": 928803115 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nindex e17e9ff67..e39d3ede8 100644\n--- a/examples/by_feature/fsdp_with_peak_mem_tracking.py\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -251,12 +251,13 @@ def collate_fn(examples):\n )\n )\n # Logging the peak memory usage of the GPU to the tracker\n- accelerator.log(\n- {\n- \"train_total_peak_memory\": tracemalloc.peaked + b2mb(tracemalloc.begin),\n- },\n- step=epoch,\n- )\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"train_total_peak_memory\": tracemalloc.peaked + b2mb(tracemalloc.begin),\n+ },\n+ step=epoch,\n+ )\n \n # New Code #\n # context manager to track the peak memory usage during the evaluation\n@@ -302,12 +303,13 @@ def collate_fn(examples):\n \"Total Peak Memory consumed during the eval (max): {}\".format(tracemalloc.peaked + b2mb(tracemalloc.begin))\n )\n # Logging the peak memory usage of the GPU to the tracker\n- accelerator.log(\n- {\n- \"eval_total_peak_memory\": tracemalloc.peaked + b2mb(tracemalloc.begin),\n- },\n- step=epoch,\n- )\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"eval_total_peak_memory\": tracemalloc.peaked + b2mb(tracemalloc.begin),\n+ },\n+ step=epoch,\n+ )\n \n if args.with_tracking:\n accelerator.end_training()\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex fa4496f91..ba34587e2 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -465,17 +465,20 @@ def prepare_model(self, model):\n elif self.distributed_type == DistributedType.FSDP:\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n \n- fsdp_plugin = self.state.fsdp_plugin\n- model = FSDP(\n- model,\n- sharding_strategy=fsdp_plugin.sharding_strategy,\n- cpu_offload=fsdp_plugin.cpu_offload,\n- auto_wrap_policy=fsdp_plugin.auto_wrap_policy,\n- backward_prefetch=fsdp_plugin.backward_prefetch,\n- ignored_modules=fsdp_plugin.ignored_modules,\n- )\n- if not fsdp_plugin.cpu_offload.offload_params:\n- model.to(self.device)\n+ # Check if the model is already a FSDP model due to `Manual Wrapping` and if so,\n+ # don't wrap it again\n+ if type(model) != FSDP:\n+ fsdp_plugin = self.state.fsdp_plugin\n+ model = FSDP(\n+ model,\n+ sharding_strategy=fsdp_plugin.sharding_strategy,\n+ cpu_offload=fsdp_plugin.cpu_offload,\n+ auto_wrap_policy=fsdp_plugin.auto_wrap_policy,\n+ backward_prefetch=fsdp_plugin.backward_prefetch,\n+ ignored_modules=fsdp_plugin.ignored_modules,\n+ )\n+ if not fsdp_plugin.cpu_offload.offload_params:\n+ model.to(self.device)\n elif self.distributed_type == DistributedType.MULTI_CPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/342", "pr_id": 928796523 }, { "diff": "diff --git a/docs/source/memory.mdx b/docs/source/memory.mdx\nindex 31a2bc66f..e578e43d7 100644\n--- a/docs/source/memory.mdx\n+++ b/docs/source/memory.mdx\n@@ -48,4 +48,4 @@ def training_function(args):\n + inner_training_loop()\n ```\n \n-[[autodoc]] memory_utils.find_executable_batch_size\n\\ No newline at end of file\n+[[autodoc]] utils.find_executable_batch_size\n\\ No newline at end of file\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nindex 67263de52..6f3f3e0cb 100644\n--- a/examples/by_feature/memory.py\n+++ b/examples/by_feature/memory.py\n@@ -19,7 +19,7 @@\n from accelerate import Accelerator, DistributedType\n \n # New Code #\n-from accelerate.memory_utils import find_executable_batch_size\n+from accelerate.utils import find_executable_batch_size\n from datasets import load_dataset, load_metric\n from transformers import (\n AdamW,\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex 1589423be..b74c683b4 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -5,7 +5,13 @@\n __version__ = \"0.8.0.dev0\"\n \n from .accelerator import Accelerator\n-from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs\n from .launchers import debug_launcher, notebook_launcher\n-from .state import DistributedType\n-from .utils import DeepSpeedPlugin, synchronize_rng_states\n+from .utils import (\n+ DeepSpeedPlugin,\n+ DistributedDataParallelKwargs,\n+ DistributedType,\n+ GradScalerKwargs,\n+ InitProcessGroupKwargs,\n+ find_executable_batch_size,\n+ synchronize_rng_states,\n+)\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 7f1480436..fa4496f91 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -25,15 +25,19 @@\n \n from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\n from .data_loader import prepare_data_loader\n-from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n from .logging import get_logger\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n-from .state import AcceleratorState, DistributedType, is_deepspeed_available\n+from .state import AcceleratorState\n from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n DeepSpeedPlugin,\n+ DistributedDataParallelKwargs,\n+ DistributedType,\n FullyShardedDataParallelPlugin,\n+ GradScalerKwargs,\n+ InitProcessGroupKwargs,\n+ KwargsHandler,\n LoggerType,\n PrecisionType,\n RNGType,\n@@ -41,6 +45,7 @@\n extract_model_from_parallel,\n gather,\n get_pretty_name,\n+ is_deepspeed_available,\n pad_across_processes,\n reduce,\n save,\n@@ -51,7 +56,7 @@\n if is_deepspeed_available():\n import deepspeed\n \n- from .deepspeed_utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n+ from .utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n \n logger = get_logger(__name__)\n \ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex a94067144..b0e2c04a2 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -21,8 +21,16 @@\n import torch\n from torch.cuda.amp import GradScaler\n \n-from .state import is_tpu_available\n-from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME, get_pretty_name, save\n+from .utils import (\n+ MODEL_NAME,\n+ OPTIMIZER_NAME,\n+ RNG_STATE_NAME,\n+ SCALER_NAME,\n+ SCHEDULER_NAME,\n+ get_pretty_name,\n+ is_tpu_available,\n+ save,\n+)\n \n \n if is_tpu_available():\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex 5bafb3be2..11719835a 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -17,7 +17,7 @@\n import argparse\n import os\n \n-from accelerate.state import ComputeEnvironment\n+from accelerate.utils import ComputeEnvironment\n \n from .cluster import get_cluster_input\n from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex f9d35ea48..cfab0f7f8 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -14,9 +14,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from accelerate.state import ComputeEnvironment, DistributedType\n-\n-from ...utils import is_deepspeed_available\n+from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available\n from .config_args import ClusterConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n \ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 58db9b24e..fcb527aeb 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -21,7 +21,8 @@\n from typing import Optional, Union\n \n import yaml\n-from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType\n \n \n hf_cache_home = os.path.expanduser(\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex 2bc0e7ec9..fe72be267 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -14,7 +14,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+from ...utils.dataclasses import ComputeEnvironment, DistributedType, SageMakerDistributedType\n \n \n def _ask_field(input_text, convert_value=None, default=None, error_message=None):\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex 3b0fc78dd..f54659548 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -16,9 +16,8 @@\n import json\n import os\n \n-from accelerate.state import ComputeEnvironment, SageMakerDistributedType\n-from accelerate.utils import is_boto3_available\n-\n+from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType\n+from ...utils.imports import is_boto3_available\n from .config_args import SageMakerConfig\n from .config_utils import _ask_field, _convert_sagemaker_distributed_mode\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 9fa10da7b..e3ce3fd62 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -26,8 +26,13 @@\n \n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n-from accelerate.state import ComputeEnvironment, DistributedType\n-from accelerate.utils import PrecisionType, PrepareForLaunch, is_sagemaker_available\n+from accelerate.utils import (\n+ ComputeEnvironment,\n+ DistributedType,\n+ PrecisionType,\n+ PrepareForLaunch,\n+ is_sagemaker_available,\n+)\n \n \n def launch_command_parser(subparsers=None):\ndiff --git a/src/accelerate/kwargs_handlers.py b/src/accelerate/kwargs_handlers.py\ndeleted file mode 100644\nindex cf5985644..000000000\n--- a/src/accelerate/kwargs_handlers.py\n+++ /dev/null\n@@ -1,90 +0,0 @@\n-# Copyright 2021 The HuggingFace Team. All rights reserved.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import copy\n-from dataclasses import dataclass\n-from datetime import timedelta\n-from typing import Optional\n-\n-\n-class KwargsHandler:\n- \"\"\"\n- Internal mixin that implements a `to_kwargs()` method for a dataclass.\n- \"\"\"\n-\n- def to_dict(self):\n- return copy.deepcopy(self.__dict__)\n-\n- def to_kwargs(self):\n- \"\"\"\n- Returns a dictionary containing the attributes with values different from the default of this class.\n- \"\"\"\n- default_dict = self.__class__().to_dict()\n- this_dict = self.to_dict()\n- return {k: v for k, v in this_dict.items() if default_dict[k] != v}\n-\n-\n-@dataclass\n-class DistributedDataParallelKwargs(KwargsHandler):\n- \"\"\"\n- Use this object in your [`Accelerator`] to customize how your model is wrapped in a\n- `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this\n- [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more\n- information on each argument.\n-\n- <Tip warning={true}>\n-\n- `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\n-\n- </Tip>\"\"\"\n-\n- dim: int = 0\n- broadcast_buffers: bool = True\n- bucket_cap_mb: int = 25\n- find_unused_parameters: bool = False\n- check_reduction: bool = False\n- gradient_as_bucket_view: bool = False\n-\n-\n-@dataclass\n-class GradScalerKwargs(KwargsHandler):\n- \"\"\"\n- Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the\n- `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this\n- [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.\n-\n- <Tip warning={true}>\n-\n- `GradScaler` is only available in PyTorch 1.5.0 and later versions.\n-\n- </Tip>\"\"\"\n-\n- init_scale: float = 65536.0\n- growth_factor: float = 2.0\n- backoff_factor: float = 0.5\n- growth_interval: int = 2000\n- enabled: bool = True\n-\n-\n-@dataclass\n-class InitProcessGroupKwargs(KwargsHandler):\n- \"\"\"\n- Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer\n- to the documentation of this\n- [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more\n- information on each argument.\n- \"\"\"\n-\n- init_method: Optional[str] = None\n- timeout: timedelta = timedelta(seconds=1800)\ndiff --git a/src/accelerate/memory_utils.py b/src/accelerate/memory_utils.py\nindex 422ead770..eba10bb78 100644\n--- a/src/accelerate/memory_utils.py\n+++ b/src/accelerate/memory_utils.py\n@@ -12,77 +12,18 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-\"\"\"\n-A collection of utilities for ensuring that training can always occur. Heavily influenced by the\n-[toma](https://github.com/BlackHC/toma) library.\n-\"\"\"\n+# flake8: noqa\n+# There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n+# module, but to preserve other warnings. So, don't check this module at all\n \n-import functools\n-import gc\n-import inspect\n \n-import torch\n+import warnings\n \n \n-def should_reduce_batch_size(exception: Exception) -> bool:\n- \"\"\"\n- Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory\n+warnings.warn(\n+ \"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: \"\n+ \"`from accelerate import find_executable_batch_size` to avoid this warning.\",\n+ FutureWarning,\n+)\n \n- Args:\n- exception (`Exception`):\n- An exception\n- \"\"\"\n- _statements = [\n- \"CUDA out of memory.\", # CUDA OOM\n- \"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.\", # CUDNN SNAFU\n- \"DefaultCPUAllocator: can't allocate memory\", # CPU OOM\n- ]\n- if isinstance(exception, RuntimeError) and len(exception.args) == 1:\n- return any(err in exception.args[0] for err in _statements)\n- return False\n-\n-\n-def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):\n- \"\"\"\n- A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or\n- CUDNN, the batch size is cut in half and passed to `function`\n-\n- `function` must take in a `batch_size` parameter as its first argument.\n-\n- Args:\n- function (`callable`, *optional*):\n- A function to wrap\n- starting_batch_size (`int`, *optional*):\n- The batch size to try and fit into memory\n- \"\"\"\n- if function is None:\n- return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)\n-\n- batch_size = starting_batch_size\n-\n- def decorator(*args, **kwargs):\n- nonlocal batch_size\n- gc.collect()\n- torch.cuda.empty_cache()\n- params = list(inspect.signature(function).parameters.keys())\n- # Guard against user error\n- if len(params) < (len(args) + 1):\n- arg_str = \", \".join([f\"{arg}={value}\" for arg, value in zip(params[1:], args[1:])])\n- raise TypeError(\n- f\"Batch size was passed into `{function.__name__}` as the first argument when called.\"\n- f\"Remove this as the decorator already does so: `{function.__name__}({arg_str})`\"\n- )\n- while True:\n- if batch_size == 0:\n- raise RuntimeError(\"No executable batch size found, reached zero.\")\n- try:\n- return function(batch_size, *args, **kwargs)\n- except Exception as e:\n- if should_reduce_batch_size(e):\n- gc.collect()\n- torch.cuda.empty_cache()\n- batch_size //= 2\n- else:\n- raise\n-\n- return decorator\n+from .utils.memory import find_executable_batch_size\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 1bf37b7d5..dcf792592 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -19,8 +19,8 @@\n \n from packaging import version\n \n-from .state import AcceleratorState, DistributedType, is_tpu_available\n-from .utils import honor_type\n+from .state import AcceleratorState\n+from .utils import DistributedType, honor_type, is_tpu_available\n \n \n if is_tpu_available():\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex df2e36e9a..fbda09754 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -12,29 +12,17 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import importlib\n import os\n from distutils.util import strtobool\n-from enum import Enum\n \n import torch\n \n+from .utils import DistributedType, is_ccl_available, is_deepspeed_available, is_tpu_available\n \n-try:\n- import torch_ccl # noqa: F401\n \n- _ccl_available = True\n-except ImportError:\n- _ccl_available = False\n-\n-\n-try:\n+if is_tpu_available():\n import torch_xla.core.xla_model as xm\n \n- _tpu_available = True\n-except ImportError:\n- _tpu_available = False\n-\n \n def get_int_from_env(env_keys, default):\n \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\n@@ -45,22 +33,6 @@ def get_int_from_env(env_keys, default):\n return default\n \n \n-def is_ccl_available():\n- return _ccl_available\n-\n-\n-def is_apex_available():\n- return importlib.util.find_spec(\"apex\") is not None\n-\n-\n-def is_tpu_available():\n- return _tpu_available\n-\n-\n-def is_deepspeed_available():\n- return importlib.util.find_spec(\"deepspeed\") is not None\n-\n-\n def parse_flag_from_env(key, default=False):\n value = os.environ.get(key, str(default))\n return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\n@@ -71,60 +43,6 @@ def parse_choice_from_env(key, default=\"no\"):\n return value\n \n \n-class DistributedType(str, Enum):\n- \"\"\"\n- Represents a type of distributed environment.\n-\n- Values:\n-\n- - **NO** -- Not a distributed environment, just a single process.\n- - **MULTI_CPU** -- Distributed on multiple CPU nodes.\n- - **MULTI_GPU** -- Distributed on multiple GPUs.\n- - **DEEPSPEED** -- Using DeepSpeed.\n- - **TPU** -- Distributed on TPUs.\n- \"\"\"\n-\n- # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.\n- NO = \"NO\"\n- MULTI_CPU = \"MULTI_CPU\"\n- MULTI_GPU = \"MULTI_GPU\"\n- DEEPSPEED = \"DEEPSPEED\"\n- FSDP = \"FSDP\"\n- TPU = \"TPU\"\n-\n-\n-class SageMakerDistributedType(str, Enum):\n- \"\"\"\n- Represents a type of distributed environment.\n-\n- Values:\n-\n- - **NO** -- Not a distributed environment, just a single process.\n- - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.\n- - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.\n- \"\"\"\n-\n- # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\n- NO = \"NO\"\n- DATA_PARALLEL = \"DATA_PARALLEL\"\n- MODEL_PARALLEL = \"MODEL_PARALLEL\"\n-\n-\n-class ComputeEnvironment(str, Enum):\n- \"\"\"\n- Represents a type of the compute environment.\n-\n- Values:\n-\n- - **LOCAL_MACHINE** -- private/custom cluster hardware.\n- - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.\n- \"\"\"\n-\n- # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.\n- LOCAL_MACHINE = \"LOCAL_MACHINE\"\n- AMAZON_SAGEMAKER = \"AMAZON_SAGEMAKER\"\n-\n-\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex f9a99e22f..48e7bcc49 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -19,9 +19,9 @@\n \n from accelerate import Accelerator\n from accelerate.data_loader import prepare_data_loader\n-from accelerate.state import AcceleratorState, DistributedType\n+from accelerate.state import AcceleratorState\n from accelerate.test_utils import RegressionDataset, RegressionModel, are_the_same_tensors\n-from accelerate.utils import gather, set_seed, synchronize_rng_states\n+from accelerate.utils import DistributedType, gather, set_seed, synchronize_rng_states\n from packaging import version\n \n \ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 0b8e2250f..9379eb0b9 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -25,8 +25,8 @@\n \n import torch\n \n-from ..state import AcceleratorState, is_tpu_available\n-from ..utils import gather, is_comet_ml_available, is_tensorflow_available, is_wandb_available\n+from ..state import AcceleratorState\n+from ..utils import gather, is_comet_ml_available, is_tensorflow_available, is_tpu_available, is_wandb_available\n \n \n def parse_flag_from_env(key, default=False):\ndiff --git a/src/accelerate/utils/__init__.py b/src/accelerate/utils/__init__.py\nnew file mode 100644\nindex 000000000..a43c7d261\n--- /dev/null\n+++ b/src/accelerate/utils/__init__.py\n@@ -0,0 +1,61 @@\n+# flake8: noqa\n+# There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n+# module, but to preserve other warnings. So, don't check this module at all\n+\n+from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME\n+from .dataclasses import (\n+ ComputeEnvironment,\n+ DeepSpeedPlugin,\n+ DistributedDataParallelKwargs,\n+ DistributedType,\n+ FullyShardedDataParallelPlugin,\n+ GradScalerKwargs,\n+ InitProcessGroupKwargs,\n+ KwargsHandler,\n+ LoggerType,\n+ PrecisionType,\n+ RNGType,\n+ SageMakerDistributedType,\n+ TensorInformation,\n+)\n+from .imports import (\n+ is_apex_available,\n+ is_boto3_available,\n+ is_ccl_available,\n+ is_comet_ml_available,\n+ is_deepspeed_available,\n+ is_sagemaker_available,\n+ is_tensorboard_available,\n+ is_tensorflow_available,\n+ is_tpu_available,\n+ is_wandb_available,\n+)\n+from .operations import (\n+ broadcast,\n+ broadcast_object_list,\n+ concatenate,\n+ convert_outputs_to_fp32,\n+ convert_to_fp32,\n+ find_batch_size,\n+ gather,\n+ gather_object,\n+ get_data_structure,\n+ honor_type,\n+ initialize_tensors,\n+ is_tensor_information,\n+ is_torch_tensor,\n+ pad_across_processes,\n+ recursively_apply,\n+ reduce,\n+ send_to_device,\n+ slice_tensors,\n+)\n+\n+\n+if is_deepspeed_available():\n+ from .deepspeed import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n+\n+from .launch import PrepareForLaunch\n+from .memory import find_executable_batch_size\n+from .other import extract_model_from_parallel, get_pretty_name, patch_environment, save, wait_for_everyone\n+from .random import set_seed, synchronize_rng_state, synchronize_rng_states\ndiff --git a/src/accelerate/utils/constants.py b/src/accelerate/utils/constants.py\nnew file mode 100644\nindex 000000000..c2c36a486\n--- /dev/null\n+++ b/src/accelerate/utils/constants.py\n@@ -0,0 +1,19 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+SCALER_NAME = \"scaler.pt\"\n+MODEL_NAME = \"pytorch_model\"\n+RNG_STATE_NAME = \"random_states\"\n+OPTIMIZER_NAME = \"optimizer\"\n+SCHEDULER_NAME = \"scheduler\"\ndiff --git a/src/accelerate/utils/dataclasses.py b/src/accelerate/utils/dataclasses.py\nnew file mode 100644\nindex 000000000..4a19f8b51\n--- /dev/null\n+++ b/src/accelerate/utils/dataclasses.py\n@@ -0,0 +1,304 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+General namespace and dataclass related classes\n+\"\"\"\n+\n+import copy\n+import enum\n+import functools\n+import os\n+import typing\n+from dataclasses import dataclass, field\n+from datetime import timedelta\n+from typing import Callable, Iterable, Optional\n+\n+import torch\n+\n+\n+class KwargsHandler:\n+ \"\"\"\n+ Internal mixin that implements a `to_kwargs()` method for a dataclass.\n+ \"\"\"\n+\n+ def to_dict(self):\n+ return copy.deepcopy(self.__dict__)\n+\n+ def to_kwargs(self):\n+ \"\"\"\n+ Returns a dictionary containing the attributes with values different from the default of this class.\n+ \"\"\"\n+ default_dict = self.__class__().to_dict()\n+ this_dict = self.to_dict()\n+ return {k: v for k, v in this_dict.items() if default_dict[k] != v}\n+\n+\n+@dataclass\n+class DistributedDataParallelKwargs(KwargsHandler):\n+ \"\"\"\n+ Use this object in your [`Accelerator`] to customize how your model is wrapped in a\n+ `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this\n+ [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more\n+ information on each argument.\n+\n+ <Tip warning={true}>\n+\n+ `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\n+\n+ </Tip>\"\"\"\n+\n+ dim: int = 0\n+ broadcast_buffers: bool = True\n+ bucket_cap_mb: int = 25\n+ find_unused_parameters: bool = False\n+ check_reduction: bool = False\n+ gradient_as_bucket_view: bool = False\n+\n+\n+@dataclass\n+class GradScalerKwargs(KwargsHandler):\n+ \"\"\"\n+ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the\n+ `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this\n+ [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.\n+\n+ <Tip warning={true}>\n+\n+ `GradScaler` is only available in PyTorch 1.5.0 and later versions.\n+\n+ </Tip>\"\"\"\n+\n+ init_scale: float = 65536.0\n+ growth_factor: float = 2.0\n+ backoff_factor: float = 0.5\n+ growth_interval: int = 2000\n+ enabled: bool = True\n+\n+\n+@dataclass\n+class InitProcessGroupKwargs(KwargsHandler):\n+ \"\"\"\n+ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer\n+ to the documentation of this\n+ [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more\n+ information on each argument.\n+ \"\"\"\n+\n+ init_method: Optional[str] = None\n+ timeout: timedelta = timedelta(seconds=1800)\n+\n+\n+class DistributedType(str, enum.Enum):\n+ \"\"\"\n+ Represents a type of distributed environment.\n+\n+ Values:\n+\n+ - **NO** -- Not a distributed environment, just a single process.\n+ - **MULTI_CPU** -- Distributed on multiple CPU nodes.\n+ - **MULTI_GPU** -- Distributed on multiple GPUs.\n+ - **DEEPSPEED** -- Using DeepSpeed.\n+ - **TPU** -- Distributed on TPUs.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.\n+ NO = \"NO\"\n+ MULTI_CPU = \"MULTI_CPU\"\n+ MULTI_GPU = \"MULTI_GPU\"\n+ DEEPSPEED = \"DEEPSPEED\"\n+ FSDP = \"FSDP\"\n+ TPU = \"TPU\"\n+\n+\n+class SageMakerDistributedType(str, enum.Enum):\n+ \"\"\"\n+ Represents a type of distributed environment.\n+\n+ Values:\n+\n+ - **NO** -- Not a distributed environment, just a single process.\n+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.\n+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\n+ NO = \"NO\"\n+ DATA_PARALLEL = \"DATA_PARALLEL\"\n+ MODEL_PARALLEL = \"MODEL_PARALLEL\"\n+\n+\n+class ComputeEnvironment(str, enum.Enum):\n+ \"\"\"\n+ Represents a type of the compute environment.\n+\n+ Values:\n+\n+ - **LOCAL_MACHINE** -- private/custom cluster hardware.\n+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.\n+ LOCAL_MACHINE = \"LOCAL_MACHINE\"\n+ AMAZON_SAGEMAKER = \"AMAZON_SAGEMAKER\"\n+\n+\n+class EnumWithContains(enum.EnumMeta):\n+ \"A metaclass that adds the ability to check if `self` contains an item with the `in` operator\"\n+\n+ def __contains__(cls, item):\n+ try:\n+ cls(item)\n+ except ValueError:\n+ return False\n+ return True\n+\n+\n+class BaseEnum(enum.Enum, metaclass=EnumWithContains):\n+ \"An enum class that can get the value of an item with `str(Enum.key)`\"\n+\n+ def __str__(self):\n+ return self.value\n+\n+ @classmethod\n+ def list(cls):\n+ \"Method to list all the possible items in `cls`\"\n+ return list(map(lambda item: str(item), cls))\n+\n+\n+class LoggerType(BaseEnum):\n+ ALL = \"all\"\n+ TENSORBOARD = \"tensorboard\"\n+ WANDB = \"wandb\"\n+ COMETML = \"comet_ml\"\n+\n+\n+class PrecisionType(BaseEnum):\n+ NO = \"no\"\n+ FP16 = \"fp16\"\n+ BF16 = \"bf16\"\n+\n+\n+class RNGType(BaseEnum):\n+ TORCH = \"torch\"\n+ CUDA = \"cuda\"\n+ XLA = \"xla\"\n+ GENERATOR = \"generator\"\n+\n+\n+# data classes\n+\n+\n+@dataclass\n+class TensorInformation:\n+ shape: torch.Size\n+ dtype: torch.dtype\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )\n+\n+ auto_opt_mapping: bool = field(\n+ default=True,\n+ metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n+ )\n+\n+ offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n+\n+ def __post_init__(self):\n+\n+ if self.gradient_accumulation_steps is None:\n+ self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+\n+ if self.zero_stage is None:\n+ self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+\n+ if self.offload_optimizer_device is None:\n+ self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+\n+ self.deepspeed_config = {\n+ \"train_batch_size\": None,\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\n+ \"stage\": self.zero_stage,\n+ \"offload_optimizer\": {\n+ \"device\": self.offload_optimizer_device,\n+ },\n+ },\n+ \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ \"zero_allow_untested_optimizer\": True,\n+ }\n+\n+\n+@dataclass\n+class FullyShardedDataParallelPlugin:\n+ \"\"\"\n+ This plugin is used to enable fully sharded data parallelism.\n+ \"\"\"\n+\n+ sharding_strategy: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are [1] FULL_SHARD, [2] SHARD_GRAD_OP\"},\n+ )\n+ backward_prefetch: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are [1] BACKWARD_PRE, [2] BACKWARD_POST\"},\n+ )\n+ auto_wrap_policy: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n+ )\n+ cpu_offload: Optional[Callable] = field(\n+ default=None,\n+ metadata={\"help\": \"Decides Whether to offload parameters and gradients to CPU.\"},\n+ )\n+ min_num_params: int = field(\n+ default=None, metadata={\"help\": \"FSDP's minimum number of parameters for Default Auto Wrapping.\"}\n+ )\n+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(\n+ default=None,\n+ metadata={\"help\": \"A list of modules to ignore for FSDP.\"},\n+ )\n+\n+ def __post_init__(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n+ from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+\n+ if self.sharding_strategy is None:\n+ self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n+\n+ if self.cpu_offload is None:\n+ if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n+ self.cpu_offload = CPUOffload(offload_params=True)\n+ else:\n+ self.cpu_offload = CPUOffload(offload_params=False)\n+\n+ if self.min_num_params is None:\n+ self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+\n+ if self.auto_wrap_policy is None:\n+ if self.min_num_params > 0:\n+ self.auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=self.min_num_params)\ndiff --git a/src/accelerate/deepspeed_utils.py b/src/accelerate/utils/deepspeed.py\nsimilarity index 96%\nrename from src/accelerate/deepspeed_utils.py\nrename to src/accelerate/utils/deepspeed.py\nindex 5f261f2e3..891f6fdb8 100644\n--- a/src/accelerate/deepspeed_utils.py\n+++ b/src/accelerate/utils/deepspeed.py\n@@ -12,8 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from .optimizer import AcceleratedOptimizer\n-from .state import is_apex_available, is_deepspeed_available\n+from ..optimizer import AcceleratedOptimizer\n+from .imports import is_apex_available, is_deepspeed_available\n \n \n if is_deepspeed_available():\ndiff --git a/src/accelerate/utils/imports.py b/src/accelerate/utils/imports.py\nnew file mode 100644\nindex 000000000..3f10b83cf\n--- /dev/null\n+++ b/src/accelerate/utils/imports.py\n@@ -0,0 +1,72 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import importlib\n+\n+\n+try:\n+ import torch_ccl # noqa: F401\n+\n+ _ccl_available = True\n+except ImportError:\n+ _ccl_available = False\n+\n+\n+try:\n+ import torch_xla.core.xla_model as xm # noqa: F401\n+\n+ _tpu_available = True\n+except ImportError:\n+ _tpu_available = False\n+\n+\n+def is_ccl_available():\n+ return _ccl_available\n+\n+\n+def is_apex_available():\n+ return importlib.util.find_spec(\"apex\") is not None\n+\n+\n+def is_tpu_available():\n+ return _tpu_available\n+\n+\n+def is_deepspeed_available():\n+ return importlib.util.find_spec(\"deepspeed\") is not None\n+\n+\n+def is_tensorflow_available():\n+ return importlib.util.find_spec(\"tensorflow\") is not None\n+\n+\n+def is_tensorboard_available():\n+ return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n+\n+\n+def is_wandb_available():\n+ return importlib.util.find_spec(\"wandb\") is not None\n+\n+\n+def is_comet_ml_available():\n+ return importlib.util.find_spec(\"comet_ml\") is not None\n+\n+\n+def is_boto3_available():\n+ return importlib.util.find_spec(\"boto3\") is not None\n+\n+\n+def is_sagemaker_available():\n+ return importlib.util.find_spec(\"sagemaker\") is not None\ndiff --git a/src/accelerate/utils/launch.py b/src/accelerate/utils/launch.py\nnew file mode 100644\nindex 000000000..8ff624250\n--- /dev/null\n+++ b/src/accelerate/utils/launch.py\n@@ -0,0 +1,55 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+\n+import torch\n+\n+from .dataclasses import DistributedType\n+\n+\n+class PrepareForLaunch:\n+ \"\"\"\n+ Prepare a function that will launched in a distributed setup.\n+\n+ Args:\n+ launcher (`Callable`):\n+ The function to launch.\n+ distributed_type ([`~state.DistributedType`]):\n+ The distributed type to prepare for.\n+ debug (`bool`, *optional*, defaults to `False`):\n+ Whether or not this is a debug launch.\n+ \"\"\"\n+\n+ def __init__(self, launcher, distributed_type=\"NO\", debug=False):\n+ self.launcher = launcher\n+ self.distributed_type = DistributedType(distributed_type)\n+ self.debug = debug\n+\n+ def __call__(self, index, *args):\n+ if self.debug:\n+ world_size = int(os.environ.get(\"WORLD_SIZE\"))\n+ rdv_file = os.environ.get(\"ACCELERATE_DEBUG_RDV_FILE\")\n+ torch.distributed.init_process_group(\n+ \"gloo\",\n+ rank=index,\n+ store=torch.distributed.FileStore(rdv_file, world_size),\n+ world_size=world_size,\n+ )\n+ elif self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n+ # Prepare the environment for torch.distributed\n+ os.environ[\"LOCAL_RANK\"] = str(index)\n+ os.environ[\"RANK\"] = str(index)\n+\n+ self.launcher(*args)\ndiff --git a/src/accelerate/utils/memory.py b/src/accelerate/utils/memory.py\nnew file mode 100644\nindex 000000000..422ead770\n--- /dev/null\n+++ b/src/accelerate/utils/memory.py\n@@ -0,0 +1,88 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A collection of utilities for ensuring that training can always occur. Heavily influenced by the\n+[toma](https://github.com/BlackHC/toma) library.\n+\"\"\"\n+\n+import functools\n+import gc\n+import inspect\n+\n+import torch\n+\n+\n+def should_reduce_batch_size(exception: Exception) -> bool:\n+ \"\"\"\n+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory\n+\n+ Args:\n+ exception (`Exception`):\n+ An exception\n+ \"\"\"\n+ _statements = [\n+ \"CUDA out of memory.\", # CUDA OOM\n+ \"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.\", # CUDNN SNAFU\n+ \"DefaultCPUAllocator: can't allocate memory\", # CPU OOM\n+ ]\n+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:\n+ return any(err in exception.args[0] for err in _statements)\n+ return False\n+\n+\n+def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):\n+ \"\"\"\n+ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or\n+ CUDNN, the batch size is cut in half and passed to `function`\n+\n+ `function` must take in a `batch_size` parameter as its first argument.\n+\n+ Args:\n+ function (`callable`, *optional*):\n+ A function to wrap\n+ starting_batch_size (`int`, *optional*):\n+ The batch size to try and fit into memory\n+ \"\"\"\n+ if function is None:\n+ return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)\n+\n+ batch_size = starting_batch_size\n+\n+ def decorator(*args, **kwargs):\n+ nonlocal batch_size\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ params = list(inspect.signature(function).parameters.keys())\n+ # Guard against user error\n+ if len(params) < (len(args) + 1):\n+ arg_str = \", \".join([f\"{arg}={value}\" for arg, value in zip(params[1:], args[1:])])\n+ raise TypeError(\n+ f\"Batch size was passed into `{function.__name__}` as the first argument when called.\"\n+ f\"Remove this as the decorator already does so: `{function.__name__}({arg_str})`\"\n+ )\n+ while True:\n+ if batch_size == 0:\n+ raise RuntimeError(\"No executable batch size found, reached zero.\")\n+ try:\n+ return function(batch_size, *args, **kwargs)\n+ except Exception as e:\n+ if should_reduce_batch_size(e):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ batch_size //= 2\n+ else:\n+ raise\n+\n+ return decorator\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils/operations.py\nsimilarity index 58%\nrename from src/accelerate/utils.py\nrename to src/accelerate/utils/operations.py\nindex 5abbf00b5..b61ae069b 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils/operations.py\n@@ -1,4 +1,4 @@\n-# Copyright 2021 The HuggingFace Team. All rights reserved.\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -12,172 +12,34 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import functools\n-import importlib\n-import os\n-import random\n-import typing\n-from collections.abc import Mapping\n-from contextlib import contextmanager\n-from dataclasses import dataclass, field\n-from enum import Enum, EnumMeta\n+\"\"\"\n+A set of basic tensor ops compatible with tpu, gpu, and multigpu\n+\"\"\"\n+\n+\n from functools import update_wrapper\n-from typing import Any, Callable, Iterable, List, Optional, Union\n+from typing import Any, Mapping\n \n-import numpy as np\n import torch\n from torch.distributed import ReduceOp\n \n from packaging import version\n \n-from .state import AcceleratorState, DistributedType, is_deepspeed_available, is_tpu_available\n+from ..state import AcceleratorState\n+from .dataclasses import DistributedType, TensorInformation\n+from .imports import is_tpu_available\n \n \n if is_tpu_available():\n import torch_xla.core.xla_model as xm\n \n \n-def is_tensorflow_available():\n- return importlib.util.find_spec(\"tensorflow\") is not None\n-\n-\n-def is_tensorboard_available():\n- return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n-\n-\n-def is_wandb_available():\n- return importlib.util.find_spec(\"wandb\") is not None\n-\n-\n-def is_comet_ml_available():\n- return importlib.util.find_spec(\"comet_ml\") is not None\n-\n-\n-def is_boto3_available():\n- return importlib.util.find_spec(\"boto3\") is not None\n-\n-\n-def is_sagemaker_available():\n- return importlib.util.find_spec(\"sagemaker\") is not None\n-\n-\n-if is_deepspeed_available():\n- from deepspeed import DeepSpeedEngine\n-\n-SCALER_NAME = \"scaler.pt\"\n-MODEL_NAME = \"pytorch_model\"\n-RNG_STATE_NAME = \"random_states\"\n-OPTIMIZER_NAME = \"optimizer\"\n-SCHEDULER_NAME = \"scheduler\"\n-\n-\n-class EnumWithContains(EnumMeta):\n- \"A metaclass that adds the ability to check if `self` contains an item with the `in` operator\"\n-\n- def __contains__(cls, item):\n- try:\n- cls(item)\n- except ValueError:\n- return False\n- return True\n-\n-\n-class BaseEnum(Enum, metaclass=EnumWithContains):\n- \"An enum class that can get the value of an item with `str(Enum.key)`\"\n-\n- def __str__(self):\n- return self.value\n-\n- @classmethod\n- def list(cls):\n- \"Method to list all the possible items in `cls`\"\n- return list(map(lambda item: str(item), cls))\n-\n-\n-class LoggerType(BaseEnum):\n- ALL = \"all\"\n- TENSORBOARD = \"tensorboard\"\n- WANDB = \"wandb\"\n- COMETML = \"comet_ml\"\n-\n-\n-class PrecisionType(BaseEnum):\n- NO = \"no\"\n- FP16 = \"fp16\"\n- BF16 = \"bf16\"\n-\n-\n-class RNGType(BaseEnum):\n- TORCH = \"torch\"\n- CUDA = \"cuda\"\n- XLA = \"xla\"\n- GENERATOR = \"generator\"\n-\n-\n-@dataclass\n-class TensorInformation:\n- shape: torch.Size\n- dtype: torch.dtype\n-\n+def is_torch_tensor(tensor):\n+ return isinstance(tensor, torch.Tensor)\n \n-def set_seed(seed: int, device_specific: bool = False):\n- \"\"\"\n- Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.\n \n- Args:\n- seed (`int`): The seed to set.\n- device_specific (`bool`, *optional*, defaults to `False`):\n- Whether to differ the seed on each device slightly with `self.process_index`.\n- \"\"\"\n- if device_specific:\n- seed += AcceleratorState().process_index\n- random.seed(seed)\n- np.random.seed(seed)\n- torch.manual_seed(seed)\n- torch.cuda.manual_seed_all(seed)\n- # ^^ safe to call this function even if cuda is not available\n- if is_tpu_available():\n- xm.set_rng_state(seed)\n-\n-\n-def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):\n- # Get the proper rng state\n- if rng_type == RNGType.TORCH:\n- rng_state = torch.get_rng_state()\n- elif rng_type == RNGType.CUDA:\n- rng_state = torch.cuda.get_rng_state()\n- elif rng_type == RNGType.XLA:\n- assert is_tpu_available(), \"Can't synchronize XLA seeds on an environment without TPUs.\"\n- rng_state = torch.tensor(xm.get_rng_state())\n- elif rng_type == RNGType.GENERATOR:\n- assert generator is not None, \"Need a generator to synchronize its seed.\"\n- rng_state = generator.get_state()\n-\n- # Broadcast the rng state from device 0 to other devices\n- state = AcceleratorState()\n- if state.distributed_type == DistributedType.TPU:\n- rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n- elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n- rng_state = rng_state.to(state.device)\n- torch.distributed.broadcast(rng_state, 0)\n- rng_state = rng_state.cpu()\n- elif state.distributed_type == DistributedType.MULTI_CPU:\n- torch.distributed.broadcast(rng_state, 0)\n-\n- # Set the broadcast rng state\n- if rng_type == RNGType.TORCH:\n- torch.set_rng_state(rng_state)\n- elif rng_type == RNGType.CUDA:\n- torch.cuda.set_rng_state(rng_state)\n- elif rng_type == RNGType.XLA:\n- xm.set_rng_state(rng_state.item())\n- elif rng_type == RNGType.GENERATOR:\n- generator.set_state(rng_state)\n-\n-\n-def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):\n- for rng_type in rng_types:\n- synchronize_rng_state(RNGType(rng_type), generator=generator)\n+def is_tensor_information(tensor_info):\n+ return isinstance(tensor_info, TensorInformation)\n \n \n def honor_type(obj, generator):\n@@ -191,14 +53,6 @@ def honor_type(obj, generator):\n return type(obj)(*list(generator))\n \n \n-def is_torch_tensor(tensor):\n- return isinstance(tensor, torch.Tensor)\n-\n-\n-def is_tensor_information(tensor_info):\n- return isinstance(tensor_info, TensorInformation)\n-\n-\n def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):\n \"\"\"\n Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.\n@@ -305,73 +159,24 @@ def _initialize_tensor(tensor_info):\n return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)\n \n \n-def convert_to_fp32(tensor):\n- \"\"\"\n- Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.\n-\n- Args:\n- tensor (nested list/tuple/dictionary of `torch.Tensor`):\n- The data to convert from FP16/BF16 to FP32.\n-\n- Returns:\n- The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.\n- \"\"\"\n-\n- def _convert_to_fp32(tensor):\n- return tensor.float()\n-\n- def _is_fp16_bf16_tensor(tensor):\n- return hasattr(tensor, \"dtype\") and (\n- tensor.dtype == torch.float16\n- or (version.parse(torch.__version__) >= version.parse(\"1.10\") and tensor.dtype == torch.bfloat16)\n- )\n-\n- return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n-\n-\n-class ConvertOutputsToFp32:\n- \"\"\"\n- Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n- precision will be convert back to FP32.\n-\n- Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n-\n- Args:\n- model_forward (`Callable`):\n- The function which outputs we want to treat.\n-\n- Returns:\n- The same function as `model_forward` but with converted outputs.\n- \"\"\"\n-\n- def __init__(self, model_forward):\n- self.model_forward = model_forward\n- update_wrapper(self, model_forward)\n-\n- def __call__(self, *args, **kwargs):\n- return convert_to_fp32(self.model_forward(*args, **kwargs))\n-\n-\n-convert_outputs_to_fp32 = ConvertOutputsToFp32\n-\n-\n-def extract_model_from_parallel(model):\n+def find_batch_size(data):\n \"\"\"\n- Extract a model from its distributed containers.\n+ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.\n \n Args:\n- model (`torch.nn.Module`): The model to extract.\n+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.\n \n Returns:\n- `torch.nn.Module`: The extracted model.\n+ `int`: The batch size.\n \"\"\"\n- options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n- if is_deepspeed_available():\n- options += (DeepSpeedEngine,)\n-\n- while isinstance(model, options):\n- model = model.module\n- return model\n+ if isinstance(data, (tuple, list)):\n+ return find_batch_size(data[0])\n+ elif isinstance(data, Mapping):\n+ for k in data.keys():\n+ return find_batch_size(data[k])\n+ elif not isinstance(data, torch.Tensor):\n+ raise TypeError(f\"Can only find the batch size of tensors but got {type(data)}.\")\n+ return data.shape[0]\n \n \n def _tpu_gather(tensor, name=\"gather tensor\"):\n@@ -536,26 +341,6 @@ def _slice_tensor(tensor, tensor_slice):\n return recursively_apply(_slice_tensor, data, tensor_slice)\n \n \n-def find_batch_size(data):\n- \"\"\"\n- Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.\n-\n- Args:\n- data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.\n-\n- Returns:\n- `int`: The batch size.\n- \"\"\"\n- if isinstance(data, (tuple, list)):\n- return find_batch_size(data[0])\n- elif isinstance(data, Mapping):\n- for k in data.keys():\n- return find_batch_size(data[k])\n- elif not isinstance(data, torch.Tensor):\n- raise TypeError(f\"Can only find the batch size of tensors but got {type(data)}.\")\n- return data.shape[0]\n-\n-\n def concatenate(data, dim=0):\n \"\"\"\n Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.\n@@ -657,198 +442,51 @@ def _reduce_across_processes(tensor, reduction=\"mean\"):\n return recursively_apply(_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction)\n \n \n-def wait_for_everyone():\n- \"\"\"\n- Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n-\n- <Tip warning={true}>\n-\n- Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n-\n- </Tip>\n- \"\"\"\n- if (\n- AcceleratorState().distributed_type == DistributedType.MULTI_GPU\n- or AcceleratorState().distributed_type == DistributedType.MULTI_CPU\n- or AcceleratorState().distributed_type == DistributedType.DEEPSPEED\n- ):\n- torch.distributed.barrier()\n- elif AcceleratorState().distributed_type == DistributedType.TPU:\n- xm.rendezvous(\"accelerate.utils.wait_for_everyone\")\n-\n-\n-def save(obj, f):\n+def convert_to_fp32(tensor):\n \"\"\"\n- Save the data to disk. Use in place of `torch.save()`.\n+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.\n \n Args:\n- obj: The data to save\n- f: The file (or file-like object) to use to save the data\n- \"\"\"\n- if AcceleratorState().distributed_type == DistributedType.TPU:\n- xm.save(obj, f)\n- elif AcceleratorState().local_process_index == 0:\n- torch.save(obj, f)\n-\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n+ The data to convert from FP16/BF16 to FP32.\n \n-class PrepareForLaunch:\n+ Returns:\n+ The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.\n \"\"\"\n- Prepare a function that will launched in a distributed setup.\n-\n- Args:\n- launcher (`Callable`):\n- The function to launch.\n- distributed_type ([`~state.DistributedType`]):\n- The distributed type to prepare for.\n- debug (`bool`, *optional*, defaults to `False`):\n- Whether or not this is a debug launch.\n- \"\"\"\n-\n- def __init__(self, launcher, distributed_type=\"NO\", debug=False):\n- self.launcher = launcher\n- self.distributed_type = DistributedType(distributed_type)\n- self.debug = debug\n-\n- def __call__(self, index, *args):\n- if self.debug:\n- world_size = int(os.environ.get(\"WORLD_SIZE\"))\n- rdv_file = os.environ.get(\"ACCELERATE_DEBUG_RDV_FILE\")\n- torch.distributed.init_process_group(\n- \"gloo\",\n- rank=index,\n- store=torch.distributed.FileStore(rdv_file, world_size),\n- world_size=world_size,\n- )\n- elif self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n- # Prepare the environment for torch.distributed\n- os.environ[\"LOCAL_RANK\"] = str(index)\n- os.environ[\"RANK\"] = str(index)\n-\n- self.launcher(*args)\n-\n-\n-@dataclass\n-class DeepSpeedPlugin:\n \n- gradient_accumulation_steps: int = field(\n- default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n- )\n- zero_stage: int = field(\n- default=None,\n- metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n- )\n- is_train_batch_min: str = field(\n- default=True,\n- metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n- )\n-\n- auto_opt_mapping: bool = field(\n- default=True,\n- metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n- )\n-\n- offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n-\n- def __post_init__(self):\n-\n- if self.gradient_accumulation_steps is None:\n- self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n-\n- if self.zero_stage is None:\n- self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+ def _convert_to_fp32(tensor):\n+ return tensor.float()\n \n- if self.offload_optimizer_device is None:\n- self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+ def _is_fp16_bf16_tensor(tensor):\n+ return hasattr(tensor, \"dtype\") and (\n+ tensor.dtype == torch.float16\n+ or (version.parse(torch.__version__) >= version.parse(\"1.10\") and tensor.dtype == torch.bfloat16)\n+ )\n \n- self.deepspeed_config = {\n- \"train_batch_size\": None,\n- \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n- \"zero_optimization\": {\n- \"stage\": self.zero_stage,\n- \"offload_optimizer\": {\n- \"device\": self.offload_optimizer_device,\n- },\n- },\n- \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n- \"zero_allow_untested_optimizer\": True,\n- }\n+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n \n \n-@dataclass\n-class FullyShardedDataParallelPlugin:\n- \"\"\"\n- This plugin is used to enable fully sharded data parallelism.\n+class ConvertOutputsToFp32:\n \"\"\"\n+ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n+ precision will be convert back to FP32.\n \n- sharding_strategy: \"typing.Any\" = field(\n- default=None,\n- metadata={\"help\": \"Possible options are [1] FULL_SHARD, [2] SHARD_GRAD_OP\"},\n- )\n- backward_prefetch: \"typing.Any\" = field(\n- default=None,\n- metadata={\"help\": \"Possible options are [1] BACKWARD_PRE, [2] BACKWARD_POST\"},\n- )\n- auto_wrap_policy: \"typing.Any\" = field(\n- default=None,\n- metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n- )\n- cpu_offload: Optional[Callable] = field(\n- default=None,\n- metadata={\"help\": \"Decides Whether to offload parameters and gradients to CPU.\"},\n- )\n- min_num_params: int = field(\n- default=None, metadata={\"help\": \"FSDP's minimum number of parameters for Default Auto Wrapping.\"}\n- )\n- ignored_modules: Optional[Iterable[torch.nn.Module]] = field(\n- default=None,\n- metadata={\"help\": \"A list of modules to ignore for FSDP.\"},\n- )\n-\n- def __post_init__(self):\n- from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n- from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n-\n- if self.sharding_strategy is None:\n- self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n-\n- if self.cpu_offload is None:\n- if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n- self.cpu_offload = CPUOffload(offload_params=True)\n- else:\n- self.cpu_offload = CPUOffload(offload_params=False)\n-\n- if self.min_num_params is None:\n- self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n-\n- if self.auto_wrap_policy is None:\n- if self.min_num_params > 0:\n- self.auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=self.min_num_params)\n-\n+ Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n \n-@contextmanager\n-def patch_environment(**kwargs):\n- \"\"\"\n- A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.\n+ Args:\n+ model_forward (`Callable`):\n+ The function which outputs we want to treat.\n \n- Will convert the values in `kwargs` to strings and upper-case all the keys.\n+ Returns:\n+ The same function as `model_forward` but with converted outputs.\n \"\"\"\n- for key, value in kwargs.items():\n- os.environ[key.upper()] = str(value)\n \n- yield\n+ def __init__(self, model_forward):\n+ self.model_forward = model_forward\n+ update_wrapper(self, model_forward)\n \n- for key in kwargs:\n- del os.environ[key.upper()]\n+ def __call__(self, *args, **kwargs):\n+ return convert_to_fp32(self.model_forward(*args, **kwargs))\n \n \n-def get_pretty_name(obj):\n- \"\"\"\n- Gets a pretty name from `obj`.\n- \"\"\"\n- if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n- obj = getattr(obj, \"__class__\", obj)\n- if hasattr(obj, \"__qualname__\"):\n- return obj.__qualname__\n- if hasattr(obj, \"__name__\"):\n- return obj.__name__\n- return str(obj)\n+convert_outputs_to_fp32 = ConvertOutputsToFp32\ndiff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\nnew file mode 100644\nindex 000000000..35c96a21a\n--- /dev/null\n+++ b/src/accelerate/utils/other.py\n@@ -0,0 +1,111 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from contextlib import contextmanager\n+\n+import torch\n+\n+from ..state import AcceleratorState\n+from .dataclasses import DistributedType\n+from .imports import is_deepspeed_available, is_tpu_available\n+\n+\n+if is_deepspeed_available():\n+ from deepspeed import DeepSpeedEngine\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+\n+def extract_model_from_parallel(model):\n+ \"\"\"\n+ Extract a model from its distributed containers.\n+\n+ Args:\n+ model (`torch.nn.Module`): The model to extract.\n+\n+ Returns:\n+ `torch.nn.Module`: The extracted model.\n+ \"\"\"\n+ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n+ if is_deepspeed_available():\n+ options += (DeepSpeedEngine,)\n+\n+ while isinstance(model, options):\n+ model = model.module\n+ return model\n+\n+\n+def wait_for_everyone():\n+ \"\"\"\n+ Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n+\n+ <Tip warning={true}>\n+\n+ Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n+\n+ </Tip>\n+ \"\"\"\n+ if (\n+ AcceleratorState().distributed_type == DistributedType.MULTI_GPU\n+ or AcceleratorState().distributed_type == DistributedType.MULTI_CPU\n+ or AcceleratorState().distributed_type == DistributedType.DEEPSPEED\n+ ):\n+ torch.distributed.barrier()\n+ elif AcceleratorState().distributed_type == DistributedType.TPU:\n+ xm.rendezvous(\"accelerate.utils.wait_for_everyone\")\n+\n+\n+def save(obj, f):\n+ \"\"\"\n+ Save the data to disk. Use in place of `torch.save()`.\n+\n+ Args:\n+ obj: The data to save\n+ f: The file (or file-like object) to use to save the data\n+ \"\"\"\n+ if AcceleratorState().distributed_type == DistributedType.TPU:\n+ xm.save(obj, f)\n+ elif AcceleratorState().local_process_index == 0:\n+ torch.save(obj, f)\n+\n+\n+@contextmanager\n+def patch_environment(**kwargs):\n+ \"\"\"\n+ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.\n+\n+ Will convert the values in `kwargs` to strings and upper-case all the keys.\n+ \"\"\"\n+ for key, value in kwargs.items():\n+ os.environ[key.upper()] = str(value)\n+\n+ yield\n+\n+ for key in kwargs:\n+ del os.environ[key.upper()]\n+\n+\n+def get_pretty_name(obj):\n+ \"\"\"\n+ Gets a pretty name from `obj`.\n+ \"\"\"\n+ if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n+ obj = getattr(obj, \"__class__\", obj)\n+ if hasattr(obj, \"__qualname__\"):\n+ return obj.__qualname__\n+ if hasattr(obj, \"__name__\"):\n+ return obj.__name__\n+ return str(obj)\ndiff --git a/src/accelerate/utils/random.py b/src/accelerate/utils/random.py\nnew file mode 100644\nindex 000000000..3958022a4\n--- /dev/null\n+++ b/src/accelerate/utils/random.py\n@@ -0,0 +1,87 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import random\n+from typing import List, Optional, Union\n+\n+import numpy as np\n+import torch\n+\n+from ..state import AcceleratorState\n+from .dataclasses import DistributedType, RNGType\n+from .imports import is_tpu_available\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+\n+def set_seed(seed: int, device_specific: bool = False):\n+ \"\"\"\n+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.\n+\n+ Args:\n+ seed (`int`): The seed to set.\n+ device_specific (`bool`, *optional*, defaults to `False`):\n+ Whether to differ the seed on each device slightly with `self.process_index`.\n+ \"\"\"\n+ if device_specific:\n+ seed += AcceleratorState().process_index\n+ random.seed(seed)\n+ np.random.seed(seed)\n+ torch.manual_seed(seed)\n+ torch.cuda.manual_seed_all(seed)\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(seed)\n+\n+\n+def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):\n+ # Get the proper rng state\n+ if rng_type == RNGType.TORCH:\n+ rng_state = torch.get_rng_state()\n+ elif rng_type == RNGType.CUDA:\n+ rng_state = torch.cuda.get_rng_state()\n+ elif rng_type == RNGType.XLA:\n+ assert is_tpu_available(), \"Can't synchronize XLA seeds on an environment without TPUs.\"\n+ rng_state = torch.tensor(xm.get_rng_state())\n+ elif rng_type == RNGType.GENERATOR:\n+ assert generator is not None, \"Need a generator to synchronize its seed.\"\n+ rng_state = generator.get_state()\n+\n+ # Broadcast the rng state from device 0 to other devices\n+ state = AcceleratorState()\n+ if state.distributed_type == DistributedType.TPU:\n+ rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n+ elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ rng_state = rng_state.to(state.device)\n+ torch.distributed.broadcast(rng_state, 0)\n+ rng_state = rng_state.cpu()\n+ elif state.distributed_type == DistributedType.MULTI_CPU:\n+ torch.distributed.broadcast(rng_state, 0)\n+\n+ # Set the broadcast rng state\n+ if rng_type == RNGType.TORCH:\n+ torch.set_rng_state(rng_state)\n+ elif rng_type == RNGType.CUDA:\n+ torch.cuda.set_rng_state(rng_state)\n+ elif rng_type == RNGType.XLA:\n+ xm.set_rng_state(rng_state.item())\n+ elif rng_type == RNGType.GENERATOR:\n+ generator.set_state(rng_state)\n+\n+\n+def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):\n+ for rng_type in rng_types:\n+ synchronize_rng_state(RNGType(rng_type), generator=generator)\ndiff --git a/tests/test_kwargs_handlers.py b/tests/test_kwargs_handlers.py\nindex aaf15de0c..8b438d1c6 100644\n--- a/tests/test_kwargs_handlers.py\n+++ b/tests/test_kwargs_handlers.py\n@@ -21,8 +21,8 @@\n import torch\n \n from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs\n-from accelerate.kwargs_handlers import KwargsHandler\n from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu\n+from accelerate.utils import KwargsHandler\n \n \n @dataclass\ndiff --git a/tests/test_memory_utils.py b/tests/test_memory_utils.py\nindex 42fa151d0..df125ea95 100644\n--- a/tests/test_memory_utils.py\n+++ b/tests/test_memory_utils.py\n@@ -14,7 +14,7 @@\n \n import unittest\n \n-from accelerate.memory_utils import find_executable_batch_size\n+from accelerate.utils.memory import find_executable_batch_size\n \n \n def raise_fake_out_of_memory():\ndiff --git a/tests/test_sagemaker.py b/tests/test_sagemaker.py\nindex f2d587a39..2824493d6 100644\n--- a/tests/test_sagemaker.py\n+++ b/tests/test_sagemaker.py\n@@ -4,7 +4,7 @@\n import pytest\n from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.commands.launch import _convert_nargs_to_dict\n-from accelerate.state import ComputeEnvironment\n+from accelerate.utils import ComputeEnvironment\n \n \n @dataclass\n", "code_comments": [ { "body": "This one is rather important -> let's put it in the main init as well so that users import it directly from `accelerate` (and this way they'll be immune to the next cleanup)", "diff_hunk": "@@ -19,7 +19,7 @@\n from accelerate import Accelerator, DistributedType\n \n # New Code #\n-from accelerate.memory_utils import find_executable_batch_size\n+from accelerate.utils import find_executable_batch_size", "from_author": false }, { "body": "So as said above, let's add the batch size finder here.", "diff_hunk": "@@ -5,7 +5,12 @@\n __version__ = \"0.8.0.dev0\"\n \n from .accelerator import Accelerator\n-from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs\n from .launchers import debug_launcher, notebook_launcher\n-from .state import DistributedType\n-from .utils import DeepSpeedPlugin, synchronize_rng_states\n+from .utils import (\n+ DeepSpeedPlugin,\n+ DistributedDataParallelKwargs,\n+ DistributedType,\n+ GradScalerKwargs,\n+ InitProcessGroupKwargs,\n+ synchronize_rng_states,", "from_author": false }, { "body": "```suggestion\r\n \"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: \"\r\n \"`from accelerate import find_executable_batch_size` to avoid this warning.\",\r\n```", "diff_hunk": "@@ -0,0 +1,28 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# flake8: noqa\n+# There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n+# module, but to preserve other warnings. So, don't check this module at all\n+\n+\n+import warnings\n+\n+\n+warnings.warn(\n+ \"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: `from accelerate import find_executable_batch_size` to avoid this\",", "from_author": false }, { "body": "```suggestion\r\nfrom .utils.memory import find_executable_batch_size\r\n```", "diff_hunk": "@@ -0,0 +1,28 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# flake8: noqa\n+# There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n+# module, but to preserve other warnings. So, don't check this module at all\n+\n+\n+import warnings\n+\n+\n+warnings.warn(\n+ \"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: `from accelerate import find_executable_batch_size` to avoid this\",\n+ FutureWarning,\n+)\n+\n+from accelerate import memory_utils", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/340", "pr_id": 928019348 }, { "diff": "diff --git a/docs/source/index.mdx b/docs/source/index.mdx\nindex c41ae4a0e..9d9000097 100644\n--- a/docs/source/index.mdx\n+++ b/docs/source/index.mdx\n@@ -52,7 +52,7 @@ Changing it to work with accelerate is really easy and only adds a few lines of\n + device = accelerator.device\n my_model.to(device)\n # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n-+ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n++ my_model, my_optimizer, my_training_dataloader = accelerator.prepare(\n + my_model, my_optimizer, my_training_dataloader\n + )\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "It's the least I can do, thank's for merging.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/339", "pr_id": 924846504 }, { "diff": "diff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex 170211e5e..19f8e9dea 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -13,7 +13,6 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import argparse\n-import logging\n import os\n \n import torch\n@@ -30,9 +29,6 @@\n )\n \n \n-logger = logging.getLogger(__name__)\n-\n-\n ########################################################################\n # This is a fully working simple example to use Accelerate,\n # specifically showcasing the experiment tracking capability,\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a903386aa..7f1480436 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -26,6 +26,7 @@\n from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\n from .data_loader import prepare_data_loader\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n+from .logging import get_logger\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n@@ -52,10 +53,7 @@\n \n from .deepspeed_utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n \n-import logging\n-\n-\n-logger = logging.getLogger(__name__)\n+logger = get_logger(__name__)\n \n \n class Accelerator:\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex b35bec237..a94067144 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -28,10 +28,10 @@\n if is_tpu_available():\n import torch_xla.core.xla_model as xm\n \n-import logging\n+from .logging import get_logger\n \n \n-logger = logging.getLogger(__name__)\n+logger = get_logger(__name__)\n \n \n def save_accelerator_state(\ndiff --git a/src/accelerate/logging.py b/src/accelerate/logging.py\nnew file mode 100644\nindex 000000000..2128cdca7\n--- /dev/null\n+++ b/src/accelerate/logging.py\n@@ -0,0 +1,63 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+\n+from .state import AcceleratorState\n+\n+\n+class MultiProcessAdapter(logging.LoggerAdapter):\n+ \"\"\"\n+ An adapter to assist with logging in multiprocess.\n+\n+ `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes\n+ or only the main executed one. Default is `main_process_only=True`.\n+ \"\"\"\n+\n+ @staticmethod\n+ def _should_log(main_process_only):\n+ \"Check if log should be performed\"\n+ return not main_process_only or (main_process_only and AcceleratorState().local_process_index == 0)\n+\n+ def log(self, level, msg, *args, **kwargs):\n+ \"\"\"\n+ Delegates logger call after checking if we should log.\n+\n+ Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes\n+ or only the main executed one. Default is `True` if not passed\n+ \"\"\"\n+ main_process_only = kwargs.pop(\"main_process_only\", True)\n+ if self.isEnabledFor(level) and self._should_log(main_process_only):\n+ msg, kwargs = self.process(msg, kwargs)\n+ self.logger.log(level, msg, *args, **kwargs)\n+\n+\n+def get_logger(name: str):\n+ \"\"\"\n+ Returns a `logging.Logger` for `name` that can handle multiprocessing.\n+\n+ If a log should be called on all processes, pass `main_process_only=False`\n+\n+ E.g.\n+ ```python\n+ logger.info(\"My log\", main_process_only=False)\n+ logger.debug(\"My log\", main_process_only=False)\n+ ```\n+\n+ Args:\n+ name (`str`):\n+ The name for the logger, such as `__file__`\n+ \"\"\"\n+ logger = logging.getLogger(name)\n+ return MultiProcessAdapter(logger, {})\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex de3a19310..218ddf2df 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -15,11 +15,11 @@\n # Expectation:\n # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n \n-import logging\n import os\n from abc import ABCMeta, abstractmethod, abstractproperty\n from typing import List, Optional, Union\n \n+from .logging import get_logger\n from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n \n \n@@ -41,7 +41,7 @@\n _available_trackers.append(LoggerType.COMETML)\n \n \n-logger = logging.getLogger(__name__)\n+logger = get_logger(__name__)\n \n \n def get_available_trackers():\n", "code_comments": [ { "body": "Nice!", "diff_hunk": "@@ -0,0 +1,60 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+\n+from .state import AcceleratorState\n+\n+\n+class MultiProcessAdapter(logging.LoggerAdapter):\n+ \"\"\"\n+ An adapter to assist with logging in multiprocess.\n+\n+ `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes\n+ or only the main executed one. Default is `main_process_only=True`.\n+ \"\"\"\n+\n+ @staticmethod\n+ def _should_log(main_process_only):\n+ \"Check if log should be performed\"\n+ return not main_process_only or (main_process_only and AcceleratorState().local_process_index == 0)\n+\n+ def log(self, level, msg, *args, **kwargs):\n+ \"\"\"\n+ Delegates logger call after checking if we should log\n+ \"\"\"\n+ main_process_only = kwargs.pop(\"main_process_only\", True)", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/337", "pr_id": 924147533 }, { "diff": "diff --git a/examples/by_feature/fsdp_with_peak_mem_tracking.py b/examples/by_feature/fsdp_with_peak_mem_tracking.py\nnew file mode 100644\nindex 000000000..e17e9ff67\n--- /dev/null\n+++ b/examples/by_feature/fsdp_with_peak_mem_tracking.py\n@@ -0,0 +1,369 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+# - FSDP\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# New Code #\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# New Code #\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"wandb\", logging_dir=args.logging_dir\n+ )\n+ else:\n+ accelerator = Accelerator()\n+ accelerator.print(accelerator.distributed_type)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)\n+ # New Code #\n+ # For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer\n+ model = accelerator.prepare(model)\n+\n+ # Instantiate optimizer\n+ # New Code #\n+ # For FSDP feature, at present it doesn't support multiple parameter groups,\n+ # so we need to create a single parameter group for the whole model\n+ optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr, weight_decay=2e-4)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=10,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # New Code #\n+ # For FSDP feature, prepare everything except the model as we have already prepared the model\n+ # before creating the optimizer\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ overall_step = 0\n+\n+ # Potentially load in the weights and states from a previous save\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ path = os.path.basename(args.resume_from_checkpoint)\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ resume_step = None\n+ else:\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ # If resuming by step, we also need to know exactly how far into the DataLoader we went\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ # New Code #\n+ # context manager to track the peak memory usage during the training epoch\n+ with TorchTracemalloc() as tracemalloc:\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ # We need to skip steps until we reach the resumed step\n+ if args.resume_from_checkpoint and epoch == 0:\n+ if resume_step is not None and step < resume_step:\n+ pass\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ # We keep track of the loss at each epoch\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+ # accelerator.print(lr_scheduler.get_lr())\n+\n+ overall_step += 1\n+\n+ if isinstance(checkpointing_steps, int):\n+ output_dir = f\"step_{overall_step}\"\n+ if overall_step % checkpointing_steps == 0:\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ # New Code #\n+ # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage\n+ accelerator.print(\"Memory before entering the train : {}\".format(b2mb(tracemalloc.begin)))\n+ accelerator.print(\"Memory consumed at the end of the train (end-begin): {}\".format(tracemalloc.used))\n+ accelerator.print(\"Peak Memory consumed during the train (max-begin): {}\".format(tracemalloc.peaked))\n+ accelerator.print(\n+ \"Total Peak Memory consumed during the train (max): {}\".format(\n+ tracemalloc.peaked + b2mb(tracemalloc.begin)\n+ )\n+ )\n+ # Logging the peak memory usage of the GPU to the tracker\n+ accelerator.log(\n+ {\n+ \"train_total_peak_memory\": tracemalloc.peaked + b2mb(tracemalloc.begin),\n+ },\n+ step=epoch,\n+ )\n+\n+ # New Code #\n+ # context manager to track the peak memory usage during the evaluation\n+ with TorchTracemalloc() as tracemalloc:\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"accuracy\": eval_metric[\"accuracy\"],\n+ \"f1\": eval_metric[\"f1\"],\n+ \"train_loss\": total_loss,\n+ },\n+ step=epoch,\n+ )\n+\n+ if checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{epoch}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ # New Code #\n+ # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage\n+ accelerator.print(\"Memory before entering the eval : {}\".format(b2mb(tracemalloc.begin)))\n+ accelerator.print(\"Memory consumed at the end of the eval (end-begin): {}\".format(tracemalloc.used))\n+ accelerator.print(\"Peak Memory consumed during the eval (max-begin): {}\".format(tracemalloc.peaked))\n+ accelerator.print(\n+ \"Total Peak Memory consumed during the eval (max): {}\".format(tracemalloc.peaked + b2mb(tracemalloc.begin))\n+ )\n+ # Logging the peak memory usage of the GPU to the tracker\n+ accelerator.log(\n+ {\n+ \"eval_total_peak_memory\": tracemalloc.peaked + b2mb(tracemalloc.begin),\n+ },\n+ step=epoch,\n+ )\n+\n+ if args.with_tracking:\n+ accelerator.end_training()\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n+ )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n+ parser.add_argument(\n+ \"--model_name_or_path\",\n+ type=str,\n+ help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n+ required=True,\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"seed\": 1, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 919667fc5..84b6adaef 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -40,7 +40,7 @@\n # Should mock `{script_name}.get_dataloaders` via:\n # @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n \n-EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\", \"memory.py\"]\n+EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\", \"memory.py\", \"fsdp_with_peak_mem_tracking.py\"]\n \n \n def mocked_dataloaders(accelerator, batch_size: int = 16):\n", "code_comments": [ { "body": "This should be uncommented no? Also we can remove the print statement.", "diff_hunk": "@@ -0,0 +1,378 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+# - FSDP\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# New Code #\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# New Code #\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"wandb\", logging_dir=args.logging_dir\n+ )\n+ else:\n+ accelerator = Accelerator()\n+ accelerator.print(accelerator.distributed_type)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)\n+ # model = accelerator.prepare(model) #for FSDP model needs to be prepared before the optimizer\n+ # accelerator.print(model)", "from_author": false }, { "body": "Same, we should show the new code here?", "diff_hunk": "@@ -0,0 +1,378 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+# - FSDP\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# New Code #\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# New Code #\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"wandb\", logging_dir=args.logging_dir\n+ )\n+ else:\n+ accelerator = Accelerator()\n+ accelerator.print(accelerator.distributed_type)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)\n+ # model = accelerator.prepare(model) #for FSDP model needs to be prepared before the optimizer\n+ # accelerator.print(model)\n+\n+ # Instantiate optimizer\n+ no_decay = [\"bias\", \"LayerNorm.weight\"]\n+ optimizer_grouped_parameters = [\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n+ \"weight_decay\": 2e-4,\n+ },\n+ {\n+ \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n+ \"weight_decay\": 0.0,\n+ },\n+ ]\n+ # optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n+ optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=10,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ # optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ # optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ # )", "from_author": false }, { "body": "I thought it was more on showing how FSDP reduces memory usage. We can have this to show the proper usage for FSDP, that should be better. Should I rename the file also? ", "diff_hunk": "@@ -0,0 +1,378 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+# - FSDP\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# New Code #\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# New Code #\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"wandb\", logging_dir=args.logging_dir\n+ )\n+ else:\n+ accelerator = Accelerator()\n+ accelerator.print(accelerator.distributed_type)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)\n+ # model = accelerator.prepare(model) #for FSDP model needs to be prepared before the optimizer\n+ # accelerator.print(model)", "from_author": true }, { "body": "Both should be nice to have in the same example I think. And yes something that clearly says fsdp would be better!", "diff_hunk": "@@ -0,0 +1,378 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+# - FSDP\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# New Code #\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# New Code #\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"wandb\", logging_dir=args.logging_dir\n+ )\n+ else:\n+ accelerator = Accelerator()\n+ accelerator.print(accelerator.distributed_type)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)\n+ # model = accelerator.prepare(model) #for FSDP model needs to be prepared before the optimizer\n+ # accelerator.print(model)", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,378 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import gc\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+# - FSDP\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+# New Code #\n+# Converting Bytes to Megabytes\n+def b2mb(x):\n+ return int(x / 2**20)\n+\n+\n+# New Code #\n+# This context manager is used to track the peak memory usage of the process\n+class TorchTracemalloc:\n+ def __enter__(self):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n+ self.begin = torch.cuda.memory_allocated()\n+ return self\n+\n+ def __exit__(self, *exc):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ self.end = torch.cuda.memory_allocated()\n+ self.peak = torch.cuda.max_memory_allocated()\n+ self.used = b2mb(self.end - self.begin)\n+ self.peaked = b2mb(self.peak - self.begin)\n+ # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"wandb\", logging_dir=args.logging_dir\n+ )\n+ else:\n+ accelerator = Accelerator()\n+ accelerator.print(accelerator.distributed_type)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.print(run)\n+ accelerator.init_trackers(run, config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)\n+ # model = accelerator.prepare(model) #for FSDP model needs to be prepared before the optimizer\n+ # accelerator.print(model)", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Since this is more than a \"one-feature\" script modifying the base, tests will pass if you add the script name to here: https://github.com/huggingface/accelerate/blob/main/tests/test_examples.py#L43", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/336", "pr_id": 924092985 }, { "diff": "diff --git a/src/accelerate/memory_utils.py b/src/accelerate/memory_utils.py\nindex 4ffc5d10a..422ead770 100644\n--- a/src/accelerate/memory_utils.py\n+++ b/src/accelerate/memory_utils.py\n@@ -82,5 +82,7 @@ def decorator(*args, **kwargs):\n gc.collect()\n torch.cuda.empty_cache()\n batch_size //= 2\n+ else:\n+ raise\n \n return decorator\ndiff --git a/tests/test_memory_utils.py b/tests/test_memory_utils.py\nindex cc1b3e827..42fa151d0 100644\n--- a/tests/test_memory_utils.py\n+++ b/tests/test_memory_utils.py\n@@ -80,3 +80,12 @@ def mock_training_loop_function(batch_size, arg1, arg2):\n mock_training_loop_function(128, \"hello\", \"world\")\n self.assertIn(\"Batch size was passed into `f`\", cm.exception.args[0])\n self.assertIn(\"`f(arg1='hello', arg2='world')\", cm.exception.args[0])\n+\n+ def test_any_other_error(self):\n+ @find_executable_batch_size(starting_batch_size=16)\n+ def mock_training_loop_function(batch_size):\n+ raise ValueError(\"Oops, we had an error!\")\n+\n+ with self.assertRaises(ValueError) as cm:\n+ mock_training_loop_function()\n+ self.assertIn(\"Oops, we had an error!\", cm.exception.args[0])\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/335", "pr_id": 923648946 }, { "diff": "diff --git a/src/accelerate/memory_utils.py b/src/accelerate/memory_utils.py\nindex 73a298caf..4ffc5d10a 100644\n--- a/src/accelerate/memory_utils.py\n+++ b/src/accelerate/memory_utils.py\n@@ -73,6 +73,8 @@ def decorator(*args, **kwargs):\n f\"Remove this as the decorator already does so: `{function.__name__}({arg_str})`\"\n )\n while True:\n+ if batch_size == 0:\n+ raise RuntimeError(\"No executable batch size found, reached zero.\")\n try:\n return function(batch_size, *args, **kwargs)\n except Exception as e:\n@@ -80,7 +82,5 @@ def decorator(*args, **kwargs):\n gc.collect()\n torch.cuda.empty_cache()\n batch_size //= 2\n- else:\n- raise\n \n return decorator\ndiff --git a/tests/test_memory_utils.py b/tests/test_memory_utils.py\nindex 510d79c71..cc1b3e827 100644\n--- a/tests/test_memory_utils.py\n+++ b/tests/test_memory_utils.py\n@@ -50,6 +50,26 @@ def mock_training_loop_function(batch_size, arg1):\n self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n self.assertListEqual([bs, arg1], [8, \"hello\"])\n \n+ def test_start_zero(self):\n+ @find_executable_batch_size(starting_batch_size=0)\n+ def mock_training_loop_function(batch_size):\n+ pass\n+\n+ with self.assertRaises(RuntimeError) as cm:\n+ mock_training_loop_function()\n+ self.assertIn(\"No executable batch size found, reached zero.\", cm.exception.args[0])\n+\n+ def test_approach_zero(self):\n+ @find_executable_batch_size(starting_batch_size=16)\n+ def mock_training_loop_function(batch_size):\n+ if batch_size > 0:\n+ raise_fake_out_of_memory()\n+ pass\n+\n+ with self.assertRaises(RuntimeError) as cm:\n+ mock_training_loop_function()\n+ self.assertIn(\"No executable batch size found, reached zero.\", cm.exception.args[0])\n+\n def test_verbose_guard(self):\n @find_executable_batch_size(starting_batch_size=128)\n def mock_training_loop_function(batch_size, arg1, arg2):\n", "code_comments": [ { "body": "```suggestion\r\n def test_start_zero(self):\r\n```", "diff_hunk": "@@ -50,6 +50,26 @@ def mock_training_loop_function(batch_size, arg1):\n self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n self.assertListEqual([bs, arg1], [8, \"hello\"])\n \n+ def test_zero(self):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/334", "pr_id": 922308318 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 69464ea0b..f9d35ea48 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -64,7 +64,7 @@ def get_cluster_input():\n else:\n use_cpu = False\n \n- deepspeed_config = None\n+ deepspeed_config = {}\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n use_deepspeed = _ask_field(\n \"Do you want to use DeepSpeed? [yes/NO]: \",\n@@ -78,7 +78,6 @@ def get_cluster_input():\n is_deepspeed_available()\n ), \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source\"\n \n- deepspeed_config = {}\n if distributed_type == DistributedType.DEEPSPEED:\n deepspeed_config[\"zero_stage\"] = _ask_field(\n \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n@@ -99,6 +98,7 @@ def get_cluster_input():\n default=1,\n )\n \n+ fsdp_config = {}\n if distributed_type in [DistributedType.MULTI_GPU]:\n use_fsdp = _ask_field(\n \"Do you want to use FullyShardedDataParallel? [yes/NO]: \",\n@@ -108,7 +108,6 @@ def get_cluster_input():\n )\n if use_fsdp:\n distributed_type = DistributedType.FSDP\n- fsdp_config = {}\n if distributed_type == DistributedType.FSDP:\n fsdp_config[\"sharding_strategy\"] = _ask_field(\n \"What should be your sharding strategy ([1] FULL_SHARD, [2] SHARD_GRAD_OP)? [1]: \",\n", "code_comments": [ { "body": "```suggestion\r\n fsdp_config = {}\r\n```\r\nCan you remove the same thing at line 112 as a result?", "diff_hunk": "@@ -99,6 +99,7 @@ def get_cluster_input():\n default=1,\n )\n \n+ fsdp_config = None", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/331", "pr_id": 922240374 }, { "diff": "diff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex ac6df7741..0b8e2250f 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -26,7 +26,7 @@\n import torch\n \n from ..state import AcceleratorState, is_tpu_available\n-from ..utils import gather, is_tensorflow_available\n+from ..utils import gather, is_comet_ml_available, is_tensorflow_available, is_wandb_available\n \n \n def parse_flag_from_env(key, default=False):\n@@ -53,10 +53,51 @@ def slow(test_case):\n Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a\n truthy value to run them.\n \"\"\"\n- if not _run_slow_tests:\n- return unittest.skip(\"test is slow\")(test_case)\n- else:\n- return test_case\n+ return unittest.skipUnless(_run_slow_tests, \"test is slow\")(test_case)\n+\n+\n+def require_cuda(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.\n+ \"\"\"\n+ return unittest.skipUnless(torch.cuda.is_available(), \"test requires a GPU\")(test_case)\n+\n+\n+def require_tpu(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\n+ \"\"\"\n+ return unittest.skipUnless(is_tpu_available(), \"test requires TPU\")(test_case)\n+\n+\n+def require_multi_gpu(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple\n+ GPUs.\n+ \"\"\"\n+ return unittest.skipUnless(torch.cuda.device_count() > 1, \"test requires multiple GPUs\")(test_case)\n+\n+\n+def require_tensorflow(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n+ installed\n+ \"\"\"\n+ return unittest.skipUnless(is_tensorflow_available(), \"test requires TensorFlow\")(test_case)\n+\n+\n+def require_wandb(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed\n+ \"\"\"\n+ return unittest.skipUnless(is_wandb_available(), \"test requires wandb\")(test_case)\n+\n+\n+def require_comet_ml(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed\n+ \"\"\"\n+ return unittest.skipUnless(is_comet_ml_available(), \"test requires comet_ml\")(test_case)\n \n \n class TempDirTestCase(unittest.TestCase):\n@@ -136,48 +177,6 @@ def are_the_same_tensors(tensor):\n return True\n \n \n-def require_cuda(test_case):\n- \"\"\"\n- Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.\n- \"\"\"\n- if not torch.cuda.is_available():\n- return unittest.skip(\"test requires a GPU\")(test_case)\n- else:\n- return test_case\n-\n-\n-def require_tpu(test_case):\n- \"\"\"\n- Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\n- \"\"\"\n- if not is_tpu_available():\n- return unittest.skip(\"test requires TPU\")(test_case)\n- else:\n- return test_case\n-\n-\n-def require_multi_gpu(test_case):\n- \"\"\"\n- Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple\n- GPUs.\n- \"\"\"\n- if torch.cuda.device_count() < 2:\n- return unittest.skip(\"test requires multiple GPUs\")(test_case)\n- else:\n- return test_case\n-\n-\n-def require_tensorflow(test_case):\n- \"\"\"\n- Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n- installed\n- \"\"\"\n- if not is_tensorflow_available():\n- return unittest.skip(\"test requires TensorFlow\")(test_case)\n- else:\n- return test_case\n-\n-\n class _RunOutput:\n def __init__(self, returncode, stdout, stderr):\n self.returncode = returncode\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex c5b940a46..e9a2170ee 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -26,10 +26,19 @@\n \n # We use TF to parse the logs\n from accelerate import Accelerator\n-from accelerate.test_utils.testing import MockingTestCase, TempDirTestCase, require_tensorflow\n+from accelerate.test_utils.testing import (\n+ MockingTestCase,\n+ TempDirTestCase,\n+ require_comet_ml,\n+ require_tensorflow,\n+ require_wandb,\n+)\n from accelerate.tracking import CometMLTracker, GeneralTracker\n-from accelerate.utils import is_tensorflow_available\n-from comet_ml import OfflineExperiment\n+from accelerate.utils import is_comet_ml_available, is_tensorflow_available\n+\n+\n+if is_comet_ml_available():\n+ from comet_ml import OfflineExperiment\n \n \n if is_tensorflow_available():\n@@ -110,6 +119,7 @@ def test_logging_dir(self):\n _ = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n \n \n+@require_wandb\n @mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n class WandBTrackingTest(TempDirTestCase, MockingTestCase):\n def setUp(self):\n@@ -179,6 +189,7 @@ def offline_init(self, run_name: str, tmpdir: str):\n logger.info(\"Make sure to log any initial configurations with `self.store_init_configuration` before training!\")\n \n \n+@require_comet_ml\n @mock.patch.object(CometMLTracker, \"__init__\", offline_init)\n class CometMLTest(unittest.TestCase):\n @staticmethod\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/330", "pr_id": 922069538 }, { "diff": "diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex fadbe6211..58db9b24e 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -139,6 +139,13 @@ class ClusterConfig(BaseConfig):\n # args for fsdp\n fsdp_config: dict = None\n \n+ def __post_init__(self):\n+ if self.deepspeed_config is None:\n+ self.deepspeed_config = {}\n+ if self.fsdp_config is None:\n+ self.fsdp_config = {}\n+ return super().__post_init__()\n+\n \n @dataclass\n class SageMakerConfig(BaseConfig):\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/329", "pr_id": 921986463 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex eb3b24ce9..f62d9e7a4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -40,6 +40,7 @@\n gather,\n get_pretty_name,\n pad_across_processes,\n+ reduce,\n save,\n wait_for_everyone,\n )\n@@ -556,7 +557,7 @@ def clip_grad_value_(self, parameters, clip_value):\n \n def gather(self, tensor):\n \"\"\"\n- Gather the values in *tensor* accross all processes and concatenate them on the first dimension. Useful to\n+ Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to\n regroup the predictions from all processes when doing evaluation.\n \n Note:\n@@ -572,6 +573,18 @@ def gather(self, tensor):\n \"\"\"\n return gather(tensor)\n \n+ def reduce(self, tensor: torch.Tensor, reduction=\"sum\"):\n+ \"\"\"\n+ Reduce the values in *tensor* across all processes based on *reduction*.\n+\n+ Args:\n+ tensor (`torch.Tensor`):\n+ The tensors to reduce across all processes.\n+ reduction (`str`, *optional*, defaults to \"sum\"):\n+ A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.\n+ \"\"\"\n+ reduce(tensor, reduction)\n+\n def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\n \"\"\"\n Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 9bcd87f5f..c2d05c162 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -24,6 +24,7 @@\n \n import numpy as np\n import torch\n+from torch.distributed import ReduceOp\n \n from packaging import version\n \n@@ -621,6 +622,39 @@ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n )\n \n \n+def reduce(tensor, reduction=\"mean\"):\n+ \"\"\"\n+ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the\n+ mean of a given operation.\n+\n+ Args:\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n+ The data to reduce.\n+ reduction (`str`, *optional*, defaults to `\"mean\"`):\n+ A reduction method. Can be of \"mean\", \"sum\", or \"none\"\n+\n+ Returns:\n+ The same data structure as `data` with all the tensors reduced.\n+ \"\"\"\n+\n+ def _reduce_across_processes(tensor, reduction=\"mean\"):\n+ state = AcceleratorState()\n+ cloned_tensor = tensor.clone()\n+ if state.distributed_type == DistributedType.TPU:\n+ xm.all_reduce(\"sum\", cloned_tensor)\n+ return cloned_tensor\n+ elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n+ torch.distributed.reduce(cloned_tensor, ReduceOp.SUM)\n+ return cloned_tensor\n+ else:\n+ if reduction == \"sum\":\n+ return cloned_tensor.sum()\n+ else:\n+ return cloned_tensor.mean()\n+\n+ return recursively_apply(_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction)\n+\n+\n def wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n", "code_comments": [ { "body": "```suggestion\r\n reduction (`str`, *optional*, defaults to `\"mean\"`):\r\n```", "diff_hunk": "@@ -621,6 +622,35 @@ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n )\n \n \n+def reduce(tensor: torch.Tensor, reduction=\"mean\"):\n+ \"\"\"\n+ Reduce `tensor` across all processes and perform a `reduction`.\n+\n+ Args:\n+ tensor (`torch.Tensor`):\n+ The data to reduce\n+ reduction (`str`, *optional*, defaults to \"mean\"):", "from_author": false }, { "body": "```suggestion\r\n Reduce a tensor across all processes by the mean of a given operation.\r\n```\r\nThe arguments are described just below, so I like to keep the general description pure English :-)", "diff_hunk": "@@ -621,6 +622,35 @@ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n )\n \n \n+def reduce(tensor: torch.Tensor, reduction=\"mean\"):\n+ \"\"\"\n+ Reduce `tensor` across all processes and perform a `reduction`.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/326", "pr_id": 919680762 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 0a6d45892..1ee741653 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -23,4 +23,6 @@\n title: Checkpointing\n - local: tracking\n title: Experiment Tracking\n+ - local: memory\n+ title: Memory Utilities\n title: API Reference\ndiff --git a/docs/source/memory.mdx b/docs/source/memory.mdx\nnew file mode 100644\nindex 000000000..31a2bc66f\n--- /dev/null\n+++ b/docs/source/memory.mdx\n@@ -0,0 +1,51 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Memory Utilities\n+\n+One of the most frustrating errors when it comes to running training scripts is hitting \"CUDA Out-of-Memory\", \n+as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply\n+start their script and let it run.\n+\n+`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability.\n+\n+## find_executable_batch_size\n+\n+This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some \n+training script. To use it, restructure your training function to include an inner function that includes this wrapper, \n+and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code. \n+> Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us\n+\n+```diff\n+def training_function(args):\n+ accelerator = Accelerator()\n+ model = get_model()\n+ model.to(accelerator.device)\n+ optimizer = get_optimizer()\n+\n++ @find_executable_batch_size(starting_batch_size=args.batch_size)\n++ def inner_training_loop(batch_size):\n++ nonlocal model, optimizer # Ensure they can be used in our context\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+ lr_scheduler = get_scheduler(\n+ optimizer, \n+ num_training_steps=len(train_dataloader)*num_epochs\n+ )\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+ train(model, optimizer, train_dataloader, lr_scheduler)\n+ validate(model, eval_dataloader)\n++ inner_training_loop()\n+```\n+\n+[[autodoc]] memory_utils.find_executable_batch_size\n\\ No newline at end of file\ndiff --git a/examples/by_feature/memory.py b/examples/by_feature/memory.py\nnew file mode 100644\nindex 000000000..67263de52\n--- /dev/null\n+++ b/examples/by_feature/memory.py\n@@ -0,0 +1,218 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+\n+# New Code #\n+from accelerate.memory_utils import find_executable_batch_size\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to ensure out-of-memory errors never\n+# iterrupt training, and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # New Code #\n+ # We now can define an inner training loop function. It should take a batch size as the only parameter,\n+ # and build the dataloaders in there.\n+ # It also gets our decorator\n+ @find_executable_batch_size(starting_batch_size=batch_size)\n+ def inner_training_loop(batch_size):\n+ # And now just move everything below under this function\n+ # Ensure that anything declared outside this function is set as `nonlocal`\n+ # so it is in scope\n+ nonlocal model, optimizer\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+ # New Code #\n+ # And call it at the end with no arguments\n+ # Note: You could also refactor this outside of your training loop function\n+ inner_training_loop()\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/memory_utils.py b/src/accelerate/memory_utils.py\nnew file mode 100644\nindex 000000000..73a298caf\n--- /dev/null\n+++ b/src/accelerate/memory_utils.py\n@@ -0,0 +1,86 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A collection of utilities for ensuring that training can always occur. Heavily influenced by the\n+[toma](https://github.com/BlackHC/toma) library.\n+\"\"\"\n+\n+import functools\n+import gc\n+import inspect\n+\n+import torch\n+\n+\n+def should_reduce_batch_size(exception: Exception) -> bool:\n+ \"\"\"\n+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory\n+\n+ Args:\n+ exception (`Exception`):\n+ An exception\n+ \"\"\"\n+ _statements = [\n+ \"CUDA out of memory.\", # CUDA OOM\n+ \"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.\", # CUDNN SNAFU\n+ \"DefaultCPUAllocator: can't allocate memory\", # CPU OOM\n+ ]\n+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:\n+ return any(err in exception.args[0] for err in _statements)\n+ return False\n+\n+\n+def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):\n+ \"\"\"\n+ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or\n+ CUDNN, the batch size is cut in half and passed to `function`\n+\n+ `function` must take in a `batch_size` parameter as its first argument.\n+\n+ Args:\n+ function (`callable`, *optional*):\n+ A function to wrap\n+ starting_batch_size (`int`, *optional*):\n+ The batch size to try and fit into memory\n+ \"\"\"\n+ if function is None:\n+ return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)\n+\n+ batch_size = starting_batch_size\n+\n+ def decorator(*args, **kwargs):\n+ nonlocal batch_size\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ params = list(inspect.signature(function).parameters.keys())\n+ # Guard against user error\n+ if len(params) < (len(args) + 1):\n+ arg_str = \", \".join([f\"{arg}={value}\" for arg, value in zip(params[1:], args[1:])])\n+ raise TypeError(\n+ f\"Batch size was passed into `{function.__name__}` as the first argument when called.\"\n+ f\"Remove this as the decorator already does so: `{function.__name__}({arg_str})`\"\n+ )\n+ while True:\n+ try:\n+ return function(batch_size, *args, **kwargs)\n+ except Exception as e:\n+ if should_reduce_batch_size(e):\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ batch_size //= 2\n+ else:\n+ raise\n+\n+ return decorator\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex aa78fed9f..919667fc5 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -40,7 +40,7 @@\n # Should mock `{script_name}.get_dataloaders` via:\n # @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n \n-EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\"]\n+EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\", \"memory.py\"]\n \n \n def mocked_dataloaders(accelerator, batch_size: int = 16):\ndiff --git a/tests/test_memory_utils.py b/tests/test_memory_utils.py\nnew file mode 100644\nindex 000000000..510d79c71\n--- /dev/null\n+++ b/tests/test_memory_utils.py\n@@ -0,0 +1,62 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+\n+from accelerate.memory_utils import find_executable_batch_size\n+\n+\n+def raise_fake_out_of_memory():\n+ raise RuntimeError(\"CUDA out of memory.\")\n+\n+\n+class MemoryTest(unittest.TestCase):\n+ def test_memory_implicit(self):\n+ batch_sizes = []\n+\n+ @find_executable_batch_size(starting_batch_size=128)\n+ def mock_training_loop_function(batch_size):\n+ nonlocal batch_sizes\n+ batch_sizes.append(batch_size)\n+ if batch_size != 8:\n+ raise_fake_out_of_memory()\n+\n+ mock_training_loop_function()\n+ self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n+\n+ def test_memory_explicit(self):\n+ batch_sizes = []\n+\n+ @find_executable_batch_size(starting_batch_size=128)\n+ def mock_training_loop_function(batch_size, arg1):\n+ nonlocal batch_sizes\n+ batch_sizes.append(batch_size)\n+ if batch_size != 8:\n+ raise_fake_out_of_memory()\n+ return batch_size, arg1\n+\n+ bs, arg1 = mock_training_loop_function(\"hello\")\n+ self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n+ self.assertListEqual([bs, arg1], [8, \"hello\"])\n+\n+ def test_verbose_guard(self):\n+ @find_executable_batch_size(starting_batch_size=128)\n+ def mock_training_loop_function(batch_size, arg1, arg2):\n+ if batch_size != 8:\n+ raise raise_fake_out_of_memory()\n+\n+ with self.assertRaises(TypeError) as cm:\n+ mock_training_loop_function(128, \"hello\", \"world\")\n+ self.assertIn(\"Batch size was passed into `f`\", cm.exception.args[0])\n+ self.assertIn(\"`f(arg1='hello', arg2='world')\", cm.exception.args[0])\n", "code_comments": [ { "body": "We might hit some custom types of users here. The only thing we need to check is that there is a `batch_size` attribute and that it's an iterator, no?", "diff_hunk": "@@ -0,0 +1,82 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A collection of utilities for ensuring that training can always occur. Heavily influenced by the\n+[toma](https://github.com/BlackHC/toma) library.\n+\"\"\"\n+\n+import functools\n+from typing import List, Union\n+\n+from torch.utils import DataLoader\n+\n+from accelerate.data_loader import DataLoaderDispatcher, DataLoaderShard\n+\n+\n+def should_reduce_batch_size(exception: Exception) -> bool:\n+ \"\"\"\n+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory\n+\n+ Args:\n+ exception (`Exception`):\n+ An exception\n+ \"\"\"\n+ _statements = [\n+ \"CUDA out of memory.\", # CUDA OOM\n+ \"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.\", # CUDNN SNAFU\n+ \"DefaultCPUAllocator: can't allocate memory\", # CPU OOM\n+ ]\n+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:\n+ return any(err in exception.args[0] for err in _statements)\n+ return False\n+\n+\n+def memory_aware(\n+ function: callable = None, dataloaders: List[Union[DataLoader, DataLoaderDispatcher, DataLoaderShard]] = None\n+):\n+ \"\"\"\n+ A decorator that will reduce the batch size in half of all `dataloaders` if the wrapped function fails from any\n+ exceptions related to out-of-memory or CUDNN not supported until the function executes completely.\n+\n+ Args:\n+ function (`callable`, *optional*):\n+ A function to wrap that utilizes all declared `dataloaders`\n+ dataloaders (list of `DataLoader`, [`~data_loader.DataLoaderDispatcher`], or [`~data_loader.DataLoaderShard`], *optional*):\n+ A list of prepared or unprepared `DataLoaders`.\n+ \"\"\"\n+ if not isinstance(dataloaders, (tuple, list)):\n+ dataloaders = [dataloaders]\n+ if not all(\n+ (isinstance(dataloader, (DataLoaderDispatcher, DataLoaderShard, DataLoader)) for dataloader in dataloaders)\n+ ):\n+ raise TypeError(\n+ \"Unsupported operation attempted. One or more dataloaders passed were not of type(s) `DataLoaderDispatcher`, `DataLoaderShard`, or `torch.utils.DataLoader`\"\n+ )", "from_author": false }, { "body": "Does this actually work? PyTorch fixes the `batch_sampler` at init, so I think this is ignore by the `dataloader`.", "diff_hunk": "@@ -0,0 +1,82 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A collection of utilities for ensuring that training can always occur. Heavily influenced by the\n+[toma](https://github.com/BlackHC/toma) library.\n+\"\"\"\n+\n+import functools\n+from typing import List, Union\n+\n+from torch.utils import DataLoader\n+\n+from accelerate.data_loader import DataLoaderDispatcher, DataLoaderShard\n+\n+\n+def should_reduce_batch_size(exception: Exception) -> bool:\n+ \"\"\"\n+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory\n+\n+ Args:\n+ exception (`Exception`):\n+ An exception\n+ \"\"\"\n+ _statements = [\n+ \"CUDA out of memory.\", # CUDA OOM\n+ \"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.\", # CUDNN SNAFU\n+ \"DefaultCPUAllocator: can't allocate memory\", # CPU OOM\n+ ]\n+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:\n+ return any(err in exception.args[0] for err in _statements)\n+ return False\n+\n+\n+def memory_aware(\n+ function: callable = None, dataloaders: List[Union[DataLoader, DataLoaderDispatcher, DataLoaderShard]] = None\n+):\n+ \"\"\"\n+ A decorator that will reduce the batch size in half of all `dataloaders` if the wrapped function fails from any\n+ exceptions related to out-of-memory or CUDNN not supported until the function executes completely.\n+\n+ Args:\n+ function (`callable`, *optional*):\n+ A function to wrap that utilizes all declared `dataloaders`\n+ dataloaders (list of `DataLoader`, [`~data_loader.DataLoaderDispatcher`], or [`~data_loader.DataLoaderShard`], *optional*):\n+ A list of prepared or unprepared `DataLoaders`.\n+ \"\"\"\n+ if not isinstance(dataloaders, (tuple, list)):\n+ dataloaders = [dataloaders]\n+ if not all(\n+ (isinstance(dataloader, (DataLoaderDispatcher, DataLoaderShard, DataLoader)) for dataloader in dataloaders)\n+ ):\n+ raise TypeError(\n+ \"Unsupported operation attempted. One or more dataloaders passed were not of type(s) `DataLoaderDispatcher`, `DataLoaderShard`, or `torch.utils.DataLoader`\"\n+ )\n+\n+ if function is None:\n+ return functools.partial(memory_aware, dataloaders=dataloaders)\n+\n+ def decorator(*args, **kwargs):\n+ while True:\n+ try:\n+ return function(*args, **kwargs)\n+ except Exception as e:\n+ if should_reduce_batch_size(e):\n+ for dataloader in dataloaders:\n+ dataloader.batch_size /= 2", "from_author": false }, { "body": "Still not fond of that name ;-) What about `@find_max_batch_size`?", "diff_hunk": "@@ -0,0 +1,218 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+\n+# New Code #\n+from accelerate.memory_utils import memory_aware\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to ensure out-of-memory errors never\n+# iterrupt training, and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # New Code #\n+ # We now can define an inner training loop function. It should take a batch size as the only parameter,\n+ # and build the dataloaders in there.\n+ # It also gets our decorator\n+ @memory_aware(starting_batch_size=batch_size)", "from_author": false }, { "body": "It's cleaner to pass the string on several lines (with the `\"\"\"` you will get the new lines and indentation in the text displayed by the warning).\r\n```suggestion\r\n f\"Batch size was passed into `{function.__name__}` as the first argument when called.\"\r\n f\"Remove this as the decorator already does so: `{function.__name__}({arg_str})`\"\r\n```", "diff_hunk": "@@ -0,0 +1,87 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"\n+A collection of utilities for ensuring that training can always occur. Heavily influenced by the\n+[toma](https://github.com/BlackHC/toma) library.\n+\"\"\"\n+\n+import functools\n+import gc\n+import inspect\n+\n+import torch\n+\n+\n+def should_reduce_batch_size(exception: Exception) -> bool:\n+ \"\"\"\n+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory\n+\n+ Args:\n+ exception (`Exception`):\n+ An exception\n+ \"\"\"\n+ _statements = [\n+ \"CUDA out of memory.\", # CUDA OOM\n+ \"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.\", # CUDNN SNAFU\n+ \"DefaultCPUAllocator: can't allocate memory\", # CPU OOM\n+ ]\n+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:\n+ return any(err in exception.args[0] for err in _statements)\n+ return False\n+\n+\n+def memory_aware(function: callable = None, starting_batch_size: int = 128):\n+ \"\"\"\n+ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or\n+ CUDNN, the batch size is cut in half and passed to `function`\n+\n+ `function` must take in a `batch_size` parameter as its first argument.\n+\n+ Args:\n+ function (`callable`, *optional*):\n+ A function to wrap\n+ starting_batch_size (`int`, *optional*):\n+ The batch size to try and fit into memory\n+ \"\"\"\n+ if function is None:\n+ return functools.partial(memory_aware, starting_batch_size=starting_batch_size)\n+\n+ batch_size = starting_batch_size\n+\n+ def decorator(*args, **kwargs):\n+ nonlocal batch_size\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+ params = list(inspect.signature(function).parameters.keys())\n+ # Guard against user error\n+ if len(params) < (len(args) + 1):\n+ arg_str = \", \".join([f\"{arg}={value}\" for arg, value in zip(params[1:], args[1:])])\n+ raise TypeError(\n+ f\"\"\"Batch size was passed into `{function.__name__}` as the first argument when called.\n+ Remove this as the decorator already does so:\n+ `{function.__name__}({arg_str})`\"\"\"", "from_author": false }, { "body": "Use something better than `f` has a function name please ;-)", "diff_hunk": "@@ -0,0 +1,62 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+\n+from accelerate.memory_utils import memory_aware\n+\n+\n+def raise_fake_out_of_memory():\n+ raise RuntimeError(\"CUDA out of memory.\")\n+\n+\n+class MemoryTest(unittest.TestCase):\n+ def test_memory_implicit(self):\n+ batch_sizes = []\n+\n+ @memory_aware(starting_batch_size=128)\n+ def f(batch_size):\n+ nonlocal batch_sizes\n+ batch_sizes.append(batch_size)\n+ if batch_size != 8:\n+ raise_fake_out_of_memory()\n+\n+ f()", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -0,0 +1,62 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+\n+from accelerate.memory_utils import memory_aware\n+\n+\n+def raise_fake_out_of_memory():\n+ raise RuntimeError(\"CUDA out of memory.\")\n+\n+\n+class MemoryTest(unittest.TestCase):\n+ def test_memory_implicit(self):\n+ batch_sizes = []\n+\n+ @memory_aware(starting_batch_size=128)\n+ def f(batch_size):\n+ nonlocal batch_sizes\n+ batch_sizes.append(batch_size)\n+ if batch_size != 8:\n+ raise_fake_out_of_memory()\n+\n+ f()\n+ self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n+\n+ def test_memory_explicit(self):\n+ batch_sizes = []\n+\n+ @memory_aware(starting_batch_size=128)\n+ def f(batch_size, arg1):", "from_author": false }, { "body": "Same there!", "diff_hunk": "@@ -0,0 +1,62 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+\n+from accelerate.memory_utils import memory_aware\n+\n+\n+def raise_fake_out_of_memory():\n+ raise RuntimeError(\"CUDA out of memory.\")\n+\n+\n+class MemoryTest(unittest.TestCase):\n+ def test_memory_implicit(self):\n+ batch_sizes = []\n+\n+ @memory_aware(starting_batch_size=128)\n+ def f(batch_size):\n+ nonlocal batch_sizes\n+ batch_sizes.append(batch_size)\n+ if batch_size != 8:\n+ raise_fake_out_of_memory()\n+\n+ f()\n+ self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n+\n+ def test_memory_explicit(self):\n+ batch_sizes = []\n+\n+ @memory_aware(starting_batch_size=128)\n+ def f(batch_size, arg1):\n+ nonlocal batch_sizes\n+ batch_sizes.append(batch_size)\n+ if batch_size != 8:\n+ raise_fake_out_of_memory()\n+ return batch_size, arg1\n+\n+ bs, arg1 = f(\"hello\")\n+ self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])\n+ self.assertListEqual([8, \"hello\"], [bs, arg1])\n+\n+ def test_verbose_guard(self):\n+ @memory_aware(starting_batch_size=128)\n+ def f(batch_size, arg1, arg2):", "from_author": false }, { "body": "`find_executable_batch_size` is a bit more on the nose here with what it does. WDYT?", "diff_hunk": "@@ -0,0 +1,218 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+\n+# New Code #\n+from accelerate.memory_utils import memory_aware\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to ensure out-of-memory errors never\n+# iterrupt training, and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # New Code #\n+ # We now can define an inner training loop function. It should take a batch size as the only parameter,\n+ # and build the dataloaders in there.\n+ # It also gets our decorator\n+ @memory_aware(starting_batch_size=batch_size)", "from_author": true }, { "body": "Mostly since it's a start high -> go low approach if it broke, rather than find the maximum", "diff_hunk": "@@ -0,0 +1,218 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+\n+# New Code #\n+from accelerate.memory_utils import memory_aware\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to ensure out-of-memory errors never\n+# iterrupt training, and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # New Code #\n+ # We now can define an inner training loop function. It should take a batch size as the only parameter,\n+ # and build the dataloaders in there.\n+ # It also gets our decorator\n+ @memory_aware(starting_batch_size=batch_size)", "from_author": true }, { "body": "You do end up with the max that fits on the GPU, but your name works for me too.", "diff_hunk": "@@ -0,0 +1,218 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+\n+# New Code #\n+from accelerate.memory_utils import memory_aware\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to ensure out-of-memory errors never\n+# iterrupt training, and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # New Code #\n+ # We now can define an inner training loop function. It should take a batch size as the only parameter,\n+ # and build the dataloaders in there.\n+ # It also gets our decorator\n+ @memory_aware(starting_batch_size=batch_size)", "from_author": false }, { "body": "Put the API doc at the end.", "diff_hunk": "@@ -0,0 +1,51 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Memory Utilities\n+\n+One of the most frustrating errors when it comes to running training scripts is hitting \"CUDA Out-of-Memory\", \n+as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply\n+start their script and let it run.\n+\n+`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability.\n+\n+## find_executable_batch_size\n+\n+[[autodoc]] memory_utils.find_executable_batch_size", "from_author": false }, { "body": "Wrong name here :-)", "diff_hunk": "@@ -0,0 +1,51 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Memory Utilities\n+\n+One of the most frustrating errors when it comes to running training scripts is hitting \"CUDA Out-of-Memory\", \n+as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply\n+start their script and let it run.\n+\n+`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability.\n+\n+## find_executable_batch_size\n+\n+[[autodoc]] memory_utils.find_executable_batch_size\n+\n+This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some \n+training script. To use it, restructure your training function to include an inner function that includes this wrapper, \n+and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code. \n+> Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us\n+\n+```diff\n+def training_function(args):\n+ accelerator = Accelerator()\n+ model = get_model()\n+ model.to(accelerator.device)\n+ optimizer = get_optimizer()\n+\n++ @memory_aware(starting_batch_size=args.batch_size)", "from_author": false } ], "context": [ { "body": "Still TODO:\r\n\r\n- Add documentation\r\n- Write tests", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/324", "pr_id": 916716433 }, { "diff": "diff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex e595a8db7..ee0716b6d 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -166,6 +166,8 @@ def training_function(config, args):\n # New Code #\n # We need to keep track of how many total steps we have iterated over\n overall_step = 0\n+ # We also need to keep track of the stating epoch so files are named properly\n+ starting_epoch = 0\n \n # We need to load the checkpoint back in before training here with `load_state`\n # The total number of epochs is adjusted based on where the state is being loaded from,\n@@ -184,23 +186,23 @@ def training_function(config, args):\n training_difference = os.path.splitext(path)[0]\n \n if \"epoch\" in training_difference:\n- num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\n resume_step = None\n else:\n resume_step = int(training_difference.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- # If resuming by step, we also need to know exactly how far into the DataLoader we went\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ starting_epoch = resume_step // len(train_dataloader)\n+ resume_step -= starting_epoch * len(train_dataloader)\n \n # Now we train the model\n- for epoch in range(num_epochs):\n+ for epoch in range(starting_epoch, num_epochs):\n model.train()\n for step, batch in enumerate(train_dataloader):\n # New Code #\n # We need to skip steps until we reach the resumed step during the first epoch\n- if args.resume_from_checkpoint and epoch == 0:\n+ if args.resume_from_checkpoint and epoch == starting_epoch:\n if resume_step is not None and step < resume_step:\n- pass\n+ overall_step += 1\n+ continue\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n outputs = model(**batch)\n@@ -246,7 +248,7 @@ def training_function(config, args):\n \n # New Code #\n # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state`\n- # These are saved to folders named `step_{overall_step}`\n+ # These are saved to folders named `epoch_{epoch}`\n # Will contain files: \"pytorch_model.bin\", \"optimizer.bin\", \"scheduler.bin\", and \"random_states.pkl\"\n # If mixed precision was used, will also save a \"scalar.bin\" file\n if checkpointing_steps == \"epoch\":\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex 01033498c..80a083a8d 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -173,6 +173,10 @@ def training_function(config, args):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n+ # We need to keep track of how many total steps we have iterated over\n+ overall_step = 0\n+ # We also need to keep track of the stating epoch so files are named properly\n+ starting_epoch = 0\n \n # Potentially load in the weights and states from a previous save\n if args.resume_from_checkpoint:\n@@ -189,25 +193,24 @@ def training_function(config, args):\n training_difference = os.path.splitext(path)[0]\n \n if \"epoch\" in training_difference:\n- num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\n resume_step = None\n else:\n resume_step = int(training_difference.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- # If resuming by step, we also need to know exactly how far into the DataLoader we went\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ starting_epoch = resume_step // len(train_dataloader)\n+ resume_step -= starting_epoch * len(train_dataloader)\n \n- overall_step = 0\n # Now we train the model\n- for epoch in range(num_epochs):\n+ for epoch in range(starting_epoch, num_epochs):\n model.train()\n if args.with_tracking:\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n- if args.resume_from_checkpoint and epoch == 0:\n+ if args.resume_from_checkpoint and epoch == starting_epoch:\n if resume_step is not None and step < resume_step:\n- pass\n+ overall_step += 1\n+ continue\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n inputs = (batch[\"image\"] - mean) / std\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex a0e8d568c..1910a4f83 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -153,7 +153,10 @@ def collate_fn(examples):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n+ # We need to keep track of how many total steps we have iterated over\n overall_step = 0\n+ # We also need to keep track of the stating epoch so files are named properly\n+ starting_epoch = 0\n \n # Potentially load in the weights and states from a previous save\n if args.resume_from_checkpoint:\n@@ -170,24 +173,24 @@ def collate_fn(examples):\n training_difference = os.path.splitext(path)[0]\n \n if \"epoch\" in training_difference:\n- num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\n resume_step = None\n else:\n resume_step = int(training_difference.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- # If resuming by step, we also need to know exactly how far into the DataLoader we went\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ starting_epoch = resume_step // len(train_dataloader)\n+ resume_step -= starting_epoch * len(train_dataloader)\n \n # Now we train the model\n- for epoch in range(num_epochs):\n+ for epoch in range(starting_epoch, num_epochs):\n model.train()\n if args.with_tracking:\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n- if args.resume_from_checkpoint and epoch == 0:\n+ if args.resume_from_checkpoint and epoch == starting_epoch:\n if resume_step is not None and step < resume_step:\n- pass\n+ overall_step += 1\n+ continue\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n outputs = model(**batch)\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex e348e161f..ac6df7741 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -69,6 +69,8 @@ class TempDirTestCase(unittest.TestCase):\n The temporary directory location will be stored in `self.tmpdir`\n \"\"\"\n \n+ clear_on_setup = True\n+\n @classmethod\n def setUpClass(cls):\n \"Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`\"\n@@ -82,11 +84,12 @@ def tearDownClass(cls):\n \n def setUp(self):\n \"Destroy all contents in `self.tmpdir`, but not `self.tmpdir`\"\n- for path in Path(self.tmpdir).glob(\"**/*\"):\n- if path.is_file():\n- path.unlink()\n- elif path.is_dir():\n- shutil.rmtree(path)\n+ if self.clear_on_setup:\n+ for path in Path(self.tmpdir).glob(\"**/*\"):\n+ if path.is_file():\n+ path.unlink()\n+ elif path.is_dir():\n+ shutil.rmtree(path)\n \n \n class MockingTestCase(unittest.TestCase):\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 3ecd3bf91..aa78fed9f 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -22,7 +22,7 @@\n \n from accelerate import DistributedType\n from accelerate.test_utils.examples import compare_against_test\n-from accelerate.test_utils.testing import slow\n+from accelerate.test_utils.testing import TempDirTestCase, slow\n from datasets import load_dataset\n from transformers import AutoTokenizer\n \n@@ -158,30 +158,61 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n-class FeatureExamplesTests(unittest.TestCase):\n+class FeatureExamplesTests(TempDirTestCase):\n+ clear_on_setup = False\n+\n @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n def test_checkpointing_by_epoch(self):\n- with tempfile.TemporaryDirectory() as tmpdir:\n- testargs = f\"\"\"\n- checkpointing.py\n- --checkpointing_steps epoch\n- --output_dir {tmpdir}\n- \"\"\".split()\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps epoch\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_steps(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps 2\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_load_states_by_epoch(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n+ \"\"\".split()\n+ dummy_results = {\"accuracy\": mock.ANY, \"f1\": mock.ANY}\n+ with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n with mock.patch.object(sys, \"argv\", testargs):\n checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"epoch_0\")))\n+ with self.assertRaises(AssertionError):\n+ mocked_print.assert_any_call(\"epoch 0:\", dummy_results)\n+ with self.assertRaises(AssertionError):\n+ mocked_print.assert_any_call(\"epoch 1:\", dummy_results)\n+ mocked_print.assert_any_call(\"epoch 2:\", dummy_results)\n \n @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n- def test_checkpointing_by_steps(self):\n- with tempfile.TemporaryDirectory() as tmpdir:\n- testargs = f\"\"\"\n- checkpointing.py\n- --checkpointing_steps 2\n- --output_dir {tmpdir}\n- \"\"\".split()\n+ def test_load_states_by_steps(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"step_4\")}\n+ \"\"\".split()\n+ dummy_results = {\"accuracy\": mock.ANY, \"f1\": mock.ANY}\n+ with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n with mock.patch.object(sys, \"argv\", testargs):\n checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"step_2\")))\n+ with self.assertRaises(AssertionError):\n+ mocked_print.assert_any_call(\"epoch 0:\", dummy_results)\n+ mocked_print.assert_any_call(\"epoch 1:\", dummy_results)\n+ mocked_print.assert_any_call(\"epoch 2:\", dummy_results)\n \n @slow\n def test_cross_validation(self):\n", "code_comments": [ { "body": "How are we sure we will get those values exactly?", "diff_hunk": "@@ -158,30 +158,59 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n-class FeatureExamplesTests(unittest.TestCase):\n+class FeatureExamplesTests(TempDirTestCase):\n+ clear_on_setup = False\n+\n @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n def test_checkpointing_by_epoch(self):\n- with tempfile.TemporaryDirectory() as tmpdir:\n- testargs = f\"\"\"\n- checkpointing.py\n- --checkpointing_steps epoch\n- --output_dir {tmpdir}\n- \"\"\".split()\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps epoch\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_steps(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps 2\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_load_states_by_epoch(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n+ \"\"\".split()\n+ with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n with mock.patch.object(sys, \"argv\", testargs):\n checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"epoch_0\")))\n+ with self.assertRaises(AssertionError):\n+ mocked_print.assert_any_call(\"epoch 0:\", {\"accuracy\": 0.5, \"f1\": 0.0})", "from_author": false }, { "body": "This stems from the scheduler we use in the examples, it makes it impossible for the model to train quickly so we always get an accuracy of .5 and an f1 of 0 for all of our epochs. Hence why none of these example tests check for if we get \"good\" accuracy, it's for independent behavior.\r\n\r\n*But* I dug deeper and found `mock.ANY`. In this case we only care about matching the `epoch *` text, not the results. So instead we have something like this:\r\n\r\n```python\r\ndummy_results = {\"accuracy\":mock.ANY, \"f1\":mock.ANY}\r\nwith self.assertRaises(AssertionError):\r\n mocked_print.assert_any_call(\"epoch 0:\", dummy_results)\r\n```\r\n\r\nWhich helps me sleep much better at night", "diff_hunk": "@@ -158,30 +158,59 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n-class FeatureExamplesTests(unittest.TestCase):\n+class FeatureExamplesTests(TempDirTestCase):\n+ clear_on_setup = False\n+\n @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n def test_checkpointing_by_epoch(self):\n- with tempfile.TemporaryDirectory() as tmpdir:\n- testargs = f\"\"\"\n- checkpointing.py\n- --checkpointing_steps epoch\n- --output_dir {tmpdir}\n- \"\"\".split()\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps epoch\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_steps(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps 2\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_load_states_by_epoch(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n+ \"\"\".split()\n+ with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n with mock.patch.object(sys, \"argv\", testargs):\n checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"epoch_0\")))\n+ with self.assertRaises(AssertionError):\n+ mocked_print.assert_any_call(\"epoch 0:\", {\"accuracy\": 0.5, \"f1\": 0.0})", "from_author": true }, { "body": "You never know if your model might have learned a tiny something, so yes, much better :-)", "diff_hunk": "@@ -158,30 +158,59 @@ def test_cv_examples(self):\n self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n \n \n-class FeatureExamplesTests(unittest.TestCase):\n+class FeatureExamplesTests(TempDirTestCase):\n+ clear_on_setup = False\n+\n @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n def test_checkpointing_by_epoch(self):\n- with tempfile.TemporaryDirectory() as tmpdir:\n- testargs = f\"\"\"\n- checkpointing.py\n- --checkpointing_steps epoch\n- --output_dir {tmpdir}\n- \"\"\".split()\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps epoch\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"epoch_1\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_steps(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps 2\n+ --output_dir {self.tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(self.tmpdir, \"step_4\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_load_states_by_epoch(self):\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --resume_from_checkpoint {os.path.join(self.tmpdir, \"epoch_1\")}\n+ \"\"\".split()\n+ with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n with mock.patch.object(sys, \"argv\", testargs):\n checkpointing.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"epoch_0\")))\n+ with self.assertRaises(AssertionError):\n+ mocked_print.assert_any_call(\"epoch 0:\", {\"accuracy\": 0.5, \"f1\": 0.0})", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger 50/50 on whether to consider these \"slow\" tests or not. They add ~1.5 min to the CI", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/323", "pr_id": 915537956 }, { "diff": "diff --git a/.gitignore b/.gitignore\nindex f41377320..7f145cd5c 100644\n--- a/.gitignore\n+++ b/.gitignore\n@@ -132,4 +132,7 @@ dmypy.json\n .vscode\n \n # IntelliJ\n-.idea\n\\ No newline at end of file\n+.idea\n+\n+# Mac .DS_Store\n+.DS_Store\n\\ No newline at end of file\ndiff --git a/CONTRIBUTING.md b/CONTRIBUTING.md\nindex 3073b3381..fcc7d9b1c 100644\n--- a/CONTRIBUTING.md\n+++ b/CONTRIBUTING.md\n@@ -54,7 +54,7 @@ Did not find it? :( So we can act quickly on it, please follow these steps:\n * Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n * A short, self-contained, code snippet that allows us to reproduce the bug in\n less than 30s;\n-* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_congig.yml`)\n+* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_config.yaml`)\n \n ### Do you want a new feature?\n \ndiff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 1ee741653..d35024d66 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -23,6 +23,8 @@\n title: Checkpointing\n - local: tracking\n title: Experiment Tracking\n+ - local: fsdp\n+ title: Fully Sharded Data Parallel\n - local: memory\n title: Memory Utilities\n title: API Reference\ndiff --git a/docs/source/fsdp.mdx b/docs/source/fsdp.mdx\nnew file mode 100644\nindex 000000000..e2a0210c8\n--- /dev/null\n+++ b/docs/source/fsdp.mdx\n@@ -0,0 +1,120 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Fully Sharded Data Parallel\n+\n+To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.\n+This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.\n+To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).\n+We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.\n+All you need to do is enable it through the config.\n+\n+## How it works out the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: FSDP\n+fsdp_config:\n+ min_num_params: 2000\n+ offload_params: false\n+ sharding_strategy: 1\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n+`Min Num Params`: FSDP\\'s minimum number of parameters for Default Auto Wrapping.\n+`Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n+```\n+\n+## Few caveats to be aware of\n+\n+- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.\n+ Due to this, any optimizer created before model wrapping gets broken and occupies more memory.\n+ Hence, it is highly recommended and efficient to prepare model before creating optimizer.\n+ `Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.\n+ > FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer\n+\n+However, below is the recommended way to prepare model and optimizer while using FSDP:\n+\n+```diff\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n++ model = accelerator.prepare(model)\n+\n+optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n+\n+- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(model,\n+- optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+- )\n+\n++ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n++ optimizer, train_dataloader, eval_dataloader, lr_scheduler\n++ )\n+\n+```\n+\n+- In case of a single model, if you have created optimizer with multiple parameter groups and called prepare with them together,\n+ then the parameter groups will be lost and the following warning is displayed:\n+ > FSDP Warning: When using FSDP, several parameter groups will be conflated into\n+ > a single one due to nested module wrapping and parameter flattening.\n+ \n+ This is because parameter groups created before wrapping will have no meaning post wrapping due parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers).\n+ For instance, below are the named parameters of FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters). \n+ Here, if one has applied no weight decay for [bias, LayerNorm.weight] named parameters of unwrapped BERT model, \n+ it can't be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and \n+ the parameters of those layers are concatenated with parameters of various other layers.\n+ ```\n+ {\n+ '_fsdp_wrapped_module.flat_param': torch.Size([494209]), \n+ '_fsdp_wrapped_module._fpw_module.bert.embeddings.word_embeddings._fsdp_wrapped_module.flat_param': torch.Size([11720448]), \n+ '_fsdp_wrapped_module._fpw_module.bert.encoder._fsdp_wrapped_module.flat_param': torch.Size([42527232])\n+ }\n+ ```\n+\n+\n+- In case of multiple models, it is necessary to prepare the models before creating optimizers else it will throw an error.\n+- Mixed precision is currently not supported with FSDP.\n+\n+For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\n+After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n+For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.\n+\n+[[autodoc]] utils.FullyShardedDataParallelPlugin\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex eb3b24ce9..781330ace 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -32,6 +32,7 @@\n from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n DeepSpeedPlugin,\n+ FullyShardedDataParallelPlugin,\n LoggerType,\n PrecisionType,\n RNGType,\n@@ -80,6 +81,9 @@ class Accelerator:\n deepspeed_plugin (`DeepSpeedPlugin`, *optional*):\n Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured\n directly using *accelerate config*\n+ fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*):\n+ Tweak your FSDP related args using this argument. This argument is optional and can be configured directly\n+ using *accelerate config*\n rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration in your prepared\n dataloaders. Should be one or several of:\n@@ -127,6 +131,7 @@ def __init__(\n mixed_precision: Union[PrecisionType, str] = None,\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n+ fsdp_plugin: FullyShardedDataParallelPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n@@ -155,6 +160,15 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n \n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n+ if fsdp_plugin is None: # init from env variables\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ else:\n+ if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):\n+ raise TypeError(\"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\")\n+\n # Kwargs handlers\n self.ddp_handler = None\n self.scaler_handler = None\n@@ -183,6 +197,7 @@ def __init__(\n mixed_precision=mixed_precision,\n cpu=cpu,\n deepspeed_plugin=deepspeed_plugin,\n+ fsdp_plugin=fsdp_plugin,\n _from_accelerator=True,\n **kwargs,\n )\n@@ -326,6 +341,44 @@ def _prepare_one(self, obj, first_pass=False):\n else:\n return obj\n \n+ def _prepare_fsdp(self, *args):\n+ result = []\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ break\n+ optimizers = []\n+\n+ self._schedulers = []\n+ self._models = []\n+ intermediate_result = []\n+ for obj in args:\n+ if isinstance(obj, torch.optim.Optimizer):\n+ if len(obj.param_groups) > 1:\n+ logger.warn(\n+ \"FSDP Warning: When using FSDP, several parameter groups will be conflated into \"\n+ \"a single one due to nested module wrapping and parameter flattening.\"\n+ )\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ obj = self.prepare_optimizer(optimizer)\n+ optimizers.append(obj)\n+ elif isinstance(obj, torch.nn.Module):\n+ self._models.append(obj)\n+ intermediate_result.append(obj)\n+\n+ for obj in intermediate_result:\n+ if isinstance(obj, AcceleratedScheduler):\n+ obj.optimizer = optimizers\n+ for i, opt in enumerate(self._optimizers):\n+ if getattr(obj.scheduler, \"optimizer\", None) == opt.optimizer:\n+ obj.scheduler.optimizer = optimizers[i]\n+ obj.optimizers = [optimizers[i]]\n+ break\n+ self._schedulers.append(obj)\n+ result.append(obj)\n+ self._optimizers = optimizers\n+ return tuple(result)\n+\n def prepare(self, *args):\n \"\"\"\n Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same\n@@ -337,6 +390,25 @@ def prepare(self, *args):\n - `torch.nn.Module`: PyTorch Module\n - `torch.optim.Optimizer`: PyTorch Optimizer\n \"\"\"\n+ if self.distributed_type == DistributedType.FSDP:\n+ model_count = 0\n+ optimizer_present = False\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model_count += 1\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer_present = True\n+ if model_count > 1 and optimizer_present:\n+ raise ValueError(\n+ \"For FSDP to work with multiple models (>1), \"\n+ \"prepare must be called for all the models before optimizers are created\"\n+ )\n+ elif model_count == 1 and optimizer_present:\n+ logger.warn(\n+ \"FSDP Warning: When using FSDP, \"\n+ \"it is efficient and recommended to call prepare for the model before creating the optimizer\"\n+ )\n+\n # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will\n # have parameters disconnected from the model (so no training :-( ).\n # If the model and optimizer have parameters on different devices we raise an error.\n@@ -373,16 +445,33 @@ def prepare(self, *args):\n if isinstance(obj, torch.optim.Optimizer):\n obj._switch_parameters(mapping)\n \n+ if self.distributed_type == DistributedType.FSDP and model_count == 1 and optimizer_present:\n+ result = self._prepare_fsdp(*result)\n+\n return result if len(result) > 1 else result[0]\n \n def prepare_model(self, model):\n- if self.device_placement:\n+ if self.device_placement and self.distributed_type != DistributedType.FSDP:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n+ elif self.distributed_type == DistributedType.FSDP:\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+\n+ fsdp_plugin = self.state.fsdp_plugin\n+ model = FSDP(\n+ model,\n+ sharding_strategy=fsdp_plugin.sharding_strategy,\n+ cpu_offload=fsdp_plugin.cpu_offload,\n+ auto_wrap_policy=fsdp_plugin.auto_wrap_policy,\n+ backward_prefetch=fsdp_plugin.backward_prefetch,\n+ ignored_modules=fsdp_plugin.ignored_modules,\n+ )\n+ if not fsdp_plugin.cpu_offload.offload_params:\n+ model.to(self.device)\n elif self.distributed_type == DistributedType.MULTI_CPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n@@ -544,6 +633,12 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \"\"\"\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n \"\"\"\n+ if self.distributed_type == DistributedType.FSDP:\n+ parameters = [p for p in parameters]\n+ for model in self._models:\n+ if parameters == [p for p in model.parameters()]:\n+ model.clip_grad_norm_(max_norm, norm_type)\n+ return\n self.unscale_gradients()\n torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n \ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex d72680bea..69464ea0b 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -99,6 +99,34 @@ def get_cluster_input():\n default=1,\n )\n \n+ if distributed_type in [DistributedType.MULTI_GPU]:\n+ use_fsdp = _ask_field(\n+ \"Do you want to use FullyShardedDataParallel? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_fsdp:\n+ distributed_type = DistributedType.FSDP\n+ fsdp_config = {}\n+ if distributed_type == DistributedType.FSDP:\n+ fsdp_config[\"sharding_strategy\"] = _ask_field(\n+ \"What should be your sharding strategy ([1] FULL_SHARD, [2] SHARD_GRAD_OP)? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ )\n+ fsdp_config[\"offload_params\"] = _ask_field(\n+ \"Do you want to offload parameters and gradients to CPU? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ fsdp_config[\"min_num_params\"] = _ask_field(\n+ \"What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: \",\n+ lambda x: int(x),\n+ default=1e8,\n+ )\n+\n if distributed_type == DistributedType.TPU:\n main_training_function = _ask_field(\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n@@ -134,5 +162,6 @@ def get_cluster_input():\n main_process_port=main_process_port,\n main_training_function=main_training_function,\n deepspeed_config=deepspeed_config,\n+ fsdp_config=fsdp_config,\n use_cpu=use_cpu,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 5af912fc3..fadbe6211 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -136,6 +136,8 @@ class ClusterConfig(BaseConfig):\n \n # args for deepspeed_plugin\n deepspeed_config: dict = None\n+ # args for fsdp\n+ fsdp_config: dict = None\n \n \n @dataclass\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex f2cab1ccb..9fa10da7b 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -51,6 +51,30 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Whether to use deepspeed.\",\n )\n+ parser.add_argument(\n+ \"--use_fsdp\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether to use fsdp.\",\n+ )\n+ parser.add_argument(\n+ \"--offload_params\",\n+ default=\"false\",\n+ type=str,\n+ help=\"Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--min_num_params\",\n+ type=int,\n+ default=1e8,\n+ help=\"FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--sharding_strategy\",\n+ type=int,\n+ default=1,\n+ help=\"FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).\",\n+ )\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n@@ -225,7 +249,11 @@ def multi_gpu_launcher(args):\n mixed_precision = \"fp16\"\n \n current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n-\n+ if args.use_fsdp:\n+ current_env[\"USE_FSDP\"] = \"true\"\n+ current_env[\"FSDP_OFFLOAD_PARAMS\"] = str(args.offload_params).lower()\n+ current_env[\"FSDP_MIN_NUM_PARAMS\"] = str(args.min_num_params)\n+ current_env[\"FSDP_SHARDING_STRATEGY\"] = str(args.sharding_strategy)\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\n@@ -433,8 +461,8 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if sum([args.multi_gpu, args.tpu, args.use_deepspeed]) > 1:\n- raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`.\")\n+ if sum([args.multi_gpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:\n+ raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`, `--use_fsdp`.\")\n \n defaults = None\n # Get the default from the config file.\n@@ -444,6 +472,7 @@ def launch_command(args):\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n@@ -451,6 +480,8 @@ def launch_command(args):\n for k in defaults.deepspeed_config:\n if getattr(args, k) is None:\n setattr(args, k, defaults.deepspeed_config[k])\n+ for k in defaults.fsdp_config:\n+ setattr(args, k, defaults.fsdp_config[k])\n continue\n \n # Those args are handled separately\n@@ -472,6 +503,8 @@ def launch_command(args):\n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n deepspeed_launcher(args)\n+ elif args.use_fsdp and not args.cpu:\n+ multi_gpu_launcher(args)\n elif args.multi_gpu and not args.cpu:\n multi_gpu_launcher(args)\n elif args.tpu and not args.cpu:\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex b0521c7eb..df2e36e9a 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -89,6 +89,7 @@ class DistributedType(str, Enum):\n MULTI_CPU = \"MULTI_CPU\"\n MULTI_GPU = \"MULTI_GPU\"\n DEEPSPEED = \"DEEPSPEED\"\n+ FSDP = \"FSDP\"\n TPU = \"TPU\"\n \n \n@@ -149,6 +150,7 @@ def __init__(\n mixed_precision: str = None,\n cpu: bool = False,\n deepspeed_plugin=None,\n+ fsdp_plugin=None,\n _from_accelerator: bool = False,\n **kwargs,\n ):\n@@ -206,7 +208,13 @@ def __init__(\n self.mixed_precision = (\n parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n )\n-\n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ self.distributed_type = DistributedType.FSDP\n+ if self.mixed_precision != \"no\":\n+ raise ValueError(\n+ \"Mixed precision is currently not supported for FSDP. Please set `mixed_precision` to `no`.\"\n+ )\n+ self.fsdp_plugin = fsdp_plugin\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n self.distributed_type = DistributedType.MULTI_CPU\n if is_ccl_available() and get_int_from_env([\"CCL_WORKER_COUNT\"], 0) > 0:\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 9bcd87f5f..8101fb7ec 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -12,15 +12,17 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import functools\n import importlib\n import os\n import random\n+import typing\n from collections.abc import Mapping\n from contextlib import contextmanager\n from dataclasses import dataclass, field\n from enum import Enum, EnumMeta\n from functools import update_wrapper\n-from typing import Any, List, Optional, Union\n+from typing import Any, Callable, Iterable, List, Optional, Union\n \n import numpy as np\n import torch\n@@ -738,6 +740,57 @@ def __post_init__(self):\n }\n \n \n+@dataclass\n+class FullyShardedDataParallelPlugin:\n+ \"\"\"\n+ This plugin is used to enable fully sharded data parallelism.\n+ \"\"\"\n+\n+ sharding_strategy: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are [1] FULL_SHARD, [2] SHARD_GRAD_OP\"},\n+ )\n+ backward_prefetch: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are [1] BACKWARD_PRE, [2] BACKWARD_POST\"},\n+ )\n+ auto_wrap_policy: \"typing.Any\" = field(\n+ default=None,\n+ metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n+ )\n+ cpu_offload: Optional[Callable] = field(\n+ default=None,\n+ metadata={\"help\": \"Decides Whether to offload parameters and gradients to CPU.\"},\n+ )\n+ min_num_params: int = field(\n+ default=None, metadata={\"help\": \"FSDP's minimum number of parameters for Default Auto Wrapping.\"}\n+ )\n+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(\n+ default=None,\n+ metadata={\"help\": \"A list of modules to ignore for FSDP.\"},\n+ )\n+\n+ def __post_init__(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n+ from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+\n+ if self.sharding_strategy is None:\n+ self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n+\n+ if self.cpu_offload is None:\n+ if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n+ self.cpu_offload = CPUOffload(offload_params=True)\n+ else:\n+ self.cpu_offload = CPUOffload(offload_params=False)\n+\n+ if self.min_num_params is None:\n+ self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+\n+ if self.auto_wrap_policy is None:\n+ if self.min_num_params > 0:\n+ self.auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=self.min_num_params)\n+\n+\n @contextmanager\n def patch_environment(**kwargs):\n \"\"\"\n", "code_comments": [ { "body": "This is a first step but it assumes there is only one model and one optimizer with all the model parameters. We can call this method before the model is prepared and prepare it there, which would allow us to identify which parameters are use in which optimizers and then properly re-instantiate them with the right parameters/parameters groups.\r\n\r\nEven better would be to replace the parameters in the existing optimizers without re-instantiating as we are losing parameter groups etc.\r\n\r\n", "diff_hunk": "@@ -348,6 +349,32 @@ def _prepare_one(self, obj, first_pass=False):\n else:\n return obj\n \n+ def _prepare_fsdp(self, *args):\n+ result = []\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ break\n+ optimizers = []\n+\n+ self._schedulers = []\n+ for obj in args:\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ obj = self.prepare_optimizer(optimizer)\n+ optimizers.append(obj)\n+ elif isinstance(obj, AcceleratedScheduler):\n+ obj.optimizer = optimizers\n+ for i, opt in enumerate(self._optimizers):\n+ if getattr(obj.scheduler, \"optimizer\", None) == opt.optimizer:\n+ obj.scheduler.optimizer = optimizers[i]\n+ obj.optimizers = [optimizers[i]]\n+ break\n+ self._schedulers.append(obj)\n+ result.append(obj)\n+ self._optimizers = optimizers\n+ return tuple(result)", "from_author": false }, { "body": "Is this still necessary?", "diff_hunk": "@@ -359,6 +386,19 @@ def prepare(self, *args):\n - `torch.nn.Module`: PyTorch Module\n - `torch.optim.Optimizer`: PyTorch Optimizer\n \"\"\"\n+ if self.distributed_type == DistributedType.FSDP:\n+ model_count = 0\n+ optimizer_present = False\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model_count += 1\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer_present = True\n+ if model_count > 1 and optimizer_present:\n+ raise ValueError(\n+ \"For FSDP to work, prepare must be called for all the models before optimizers are created\"\n+ )", "from_author": false }, { "body": "Those two should also be added to the state, and we should get them from it here.", "diff_hunk": "@@ -405,6 +448,21 @@ def prepare_model(self, model):\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n+ elif self.distributed_type == DistributedType.FSDP:\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+ from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+\n+ auto_wrap_policy = None\n+ cpu_offload = None\n+ if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n+ cpu_offload = CPUOffload(offload_params=True)\n+\n+ if int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", \"0\")) > 0:\n+ auto_wrap_policy = functools.partial(\n+ default_auto_wrap_policy, min_num_params=int(os.environ[\"FSDP_MIN_NUM_PARAMS\"])\n+ )", "from_author": false }, { "body": "Was it not out in 1.11 ?. Let's use the `base_version` attribute to compare to 1.12.0 without anything added ;-)", "diff_hunk": "@@ -472,6 +499,10 @@ def launch_command(args):\n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n deepspeed_launcher(args)\n+ elif args.use_fsdp and not args.cpu:\n+ if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")", "from_author": false }, { "body": "We should throw an error if we have mixed_precision different from \"no\" instead of silently ignoring what the user said in their config.", "diff_hunk": "@@ -203,10 +204,13 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n-\n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ self.distributed_type = DistributedType.FSDP\n+ self.mixed_precision = \"no\"", "from_author": false }, { "body": "Hello, I've updated the code as per yesterday's offline discussion. ", "diff_hunk": "@@ -348,6 +349,32 @@ def _prepare_one(self, obj, first_pass=False):\n else:\n return obj\n \n+ def _prepare_fsdp(self, *args):\n+ result = []\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ break\n+ optimizers = []\n+\n+ self._schedulers = []\n+ for obj in args:\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)\n+ obj = self.prepare_optimizer(optimizer)\n+ optimizers.append(obj)\n+ elif isinstance(obj, AcceleratedScheduler):\n+ obj.optimizer = optimizers\n+ for i, opt in enumerate(self._optimizers):\n+ if getattr(obj.scheduler, \"optimizer\", None) == opt.optimizer:\n+ obj.scheduler.optimizer = optimizers[i]\n+ obj.optimizers = [optimizers[i]]\n+ break\n+ self._schedulers.append(obj)\n+ result.append(obj)\n+ self._optimizers = optimizers\n+ return tuple(result)", "from_author": true }, { "body": "Yes, currently we will only be able to handle the case of single model and hence throwing error in case of multiple models. ", "diff_hunk": "@@ -359,6 +386,19 @@ def prepare(self, *args):\n - `torch.nn.Module`: PyTorch Module\n - `torch.optim.Optimizer`: PyTorch Optimizer\n \"\"\"\n+ if self.distributed_type == DistributedType.FSDP:\n+ model_count = 0\n+ optimizer_present = False\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model_count += 1\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer_present = True\n+ if model_count > 1 and optimizer_present:\n+ raise ValueError(\n+ \"For FSDP to work, prepare must be called for all the models before optimizers are created\"\n+ )", "from_author": true }, { "body": "Done. Created FullyShardedDataParallelPlugin for handling the FSDP config.", "diff_hunk": "@@ -405,6 +448,21 @@ def prepare_model(self, model):\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n+ elif self.distributed_type == DistributedType.FSDP:\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+ from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+\n+ auto_wrap_policy = None\n+ cpu_offload = None\n+ if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n+ cpu_offload = CPUOffload(offload_params=True)\n+\n+ if int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", \"0\")) > 0:\n+ auto_wrap_policy = functools.partial(\n+ default_auto_wrap_policy, min_num_params=int(os.environ[\"FSDP_MIN_NUM_PARAMS\"])\n+ )", "from_author": true }, { "body": "As mentioned offline, at present the model saving/checkpointing is supported through nightly version only.", "diff_hunk": "@@ -472,6 +499,10 @@ def launch_command(args):\n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n deepspeed_launcher(args)\n+ elif args.use_fsdp and not args.cpu:\n+ if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -203,10 +204,13 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.mixed_precision = (\n- parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n- )\n-\n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ self.distributed_type = DistributedType.FSDP\n+ self.mixed_precision = \"no\"", "from_author": true }, { "body": "For new code, we try to avoid assert, so please use a test and rasie a `TypeError` here.", "diff_hunk": "@@ -155,6 +160,16 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n \n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n+ if fsdp_plugin is None: # init from env variables\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ else:\n+ assert isinstance(\n+ fsdp_plugin, FullyShardedDataParallelPlugin\n+ ), \"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\"", "from_author": false }, { "body": "Split to respect the 119 char limit please :-)", "diff_hunk": "@@ -326,6 +342,43 @@ def _prepare_one(self, obj, first_pass=False):\n else:\n return obj\n \n+ def _prepare_fsdp(self, *args):\n+ result = []\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ break\n+ optimizers = []\n+\n+ self._schedulers = []\n+ self._models = []\n+ intermediate_result = []\n+ for obj in args:\n+ if isinstance(obj, torch.optim.Optimizer):\n+ if len(obj.param_groups) > 1:\n+ logger.warn(\n+ \"FSDP Warning: When using FSDP, several parameter groups will be conflated into a single one due to nested module wrapping and parameter flattening.\"", "from_author": false }, { "body": "The error message could be made clearer then.", "diff_hunk": "@@ -359,6 +386,19 @@ def prepare(self, *args):\n - `torch.nn.Module`: PyTorch Module\n - `torch.optim.Optimizer`: PyTorch Optimizer\n \"\"\"\n+ if self.distributed_type == DistributedType.FSDP:\n+ model_count = 0\n+ optimizer_present = False\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model_count += 1\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer_present = True\n+ if model_count > 1 and optimizer_present:\n+ raise ValueError(\n+ \"For FSDP to work, prepare must be called for all the models before optimizers are created\"\n+ )", "from_author": false }, { "body": "There are two consecutive calls to `to` here in this case, would be better to only do one.", "diff_hunk": "@@ -373,16 +443,33 @@ def prepare(self, *args):\n if isinstance(obj, torch.optim.Optimizer):\n obj._switch_parameters(mapping)\n \n+ if self.distributed_type == DistributedType.FSDP:\n+ result = self._prepare_fsdp(*result)\n+\n return result if len(result) > 1 else result[0]\n \n def prepare_model(self, model):\n- if self.device_placement:\n+ if self.device_placement and self.distributed_type != DistributedType.FSDP:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n+ elif self.distributed_type == DistributedType.FSDP:\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+\n+ fsdp_plugin = self.state.fsdp_plugin\n+ model = FSDP(\n+ model,\n+ sharding_strategy=fsdp_plugin.sharding_strategy,\n+ cpu_offload=fsdp_plugin.cpu_offload,\n+ auto_wrap_policy=fsdp_plugin.auto_wrap_policy,\n+ backward_prefetch=fsdp_plugin.backward_prefetch,\n+ ignored_modules=fsdp_plugin.ignored_modules,\n+ ).to(self.device)\n+ if fsdp_plugin.cpu_offload.offload_params:\n+ model.to(\"cpu\")", "from_author": false }, { "body": "```suggestion\r\n \"Mixed precision is currently not supported for FSDP. Please set `mixed_precision` to `no`.\"\r\n```", "diff_hunk": "@@ -206,7 +208,13 @@ def __init__(\n self.mixed_precision = (\n parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n )\n-\n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ self.distributed_type = DistributedType.FSDP\n+ if self.mixed_precision != \"no\":\n+ raise ValueError(\n+ \"Mixed precision is currently not supported for FSDPS. Please set `mixed_precision` to `no`.\"", "from_author": false }, { "body": "Why add this here, is this used anywhere? The dataclass in itself should be enough to contain the full config?", "diff_hunk": "@@ -738,6 +739,68 @@ def __post_init__(self):\n }\n \n \n+@dataclass\n+class FullyShardedDataParallelPlugin:\n+ \"\"\"\n+ This plugin is used to enable fully sharded data parallelism.\n+ \"\"\"\n+\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy\n+\n+ sharding_strategy: Optional[ShardingStrategy] = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are {}\".format(ShardingStrategy.__members__)},\n+ )\n+ backward_prefetch: Optional[BackwardPrefetch] = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are {}\".format(BackwardPrefetch.__members__)},\n+ )\n+ auto_wrap_policy: Optional[Callable] = field(\n+ default=None,\n+ metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n+ )\n+ cpu_offload: Optional[CPUOffload] = field(\n+ default=None,\n+ metadata={\"help\": \"Decides Whether to offload parameters and gradients to CPU.\"},\n+ )\n+ min_num_params: int = field(\n+ default=None, metadata={\"help\": \"FSDP's minimum number of parameters for Default Auto Wrapping.\"}\n+ )\n+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(\n+ default=None,\n+ metadata={\"help\": \"A list of modules to ignore for FSDP.\"},\n+ )\n+\n+ def __post_init__(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n+ from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+\n+ if self.sharding_strategy is None:\n+ self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n+\n+ if self.cpu_offload is None:\n+ if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n+ self.cpu_offload = CPUOffload(offload_params=True)\n+ else:\n+ self.cpu_offload = CPUOffload(offload_params=False)\n+\n+ if self.min_num_params is None:\n+ self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+\n+ if self.auto_wrap_policy is None:\n+ if self.min_num_params > 0:\n+ self.auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=self.min_num_params)\n+\n+ self.fsdp_config = {\n+ \"sharding_strategy\": str(self.sharding_strategy),\n+ \"cpu_offload\": self.cpu_offload.offload_params,\n+ \"min_num_params\": self.min_num_params,\n+ \"is_default_auto_wrap_policy\": self.auto_wrap_policy.__name__ == \"default_auto_wrap_policy\",\n+ \"ignored_modules_present\": self.ignored_modules is not None,\n+ \"backward_prefetch_present\": self.backward_prefetch is not None,\n+ }", "from_author": false }, { "body": "As said offline this is the line making the CI red.", "diff_hunk": "@@ -738,6 +739,68 @@ def __post_init__(self):\n }\n \n \n+@dataclass\n+class FullyShardedDataParallelPlugin:\n+ \"\"\"\n+ This plugin is used to enable fully sharded data parallelism.\n+ \"\"\"\n+\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy", "from_author": false }, { "body": "Got it. Thanks for pointing this out. Fixed this.", "diff_hunk": "@@ -738,6 +739,68 @@ def __post_init__(self):\n }\n \n \n+@dataclass\n+class FullyShardedDataParallelPlugin:\n+ \"\"\"\n+ This plugin is used to enable fully sharded data parallelism.\n+ \"\"\"\n+\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy", "from_author": true }, { "body": "It was used sort of \"__repr__\" to be printed when printing the accelerator state. removed this as the dataclass should be the full config.", "diff_hunk": "@@ -738,6 +739,68 @@ def __post_init__(self):\n }\n \n \n+@dataclass\n+class FullyShardedDataParallelPlugin:\n+ \"\"\"\n+ This plugin is used to enable fully sharded data parallelism.\n+ \"\"\"\n+\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy\n+\n+ sharding_strategy: Optional[ShardingStrategy] = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are {}\".format(ShardingStrategy.__members__)},\n+ )\n+ backward_prefetch: Optional[BackwardPrefetch] = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are {}\".format(BackwardPrefetch.__members__)},\n+ )\n+ auto_wrap_policy: Optional[Callable] = field(\n+ default=None,\n+ metadata={\"help\": \"A callable specifying a policy to recursively wrap layers with FSDP\"},\n+ )\n+ cpu_offload: Optional[CPUOffload] = field(\n+ default=None,\n+ metadata={\"help\": \"Decides Whether to offload parameters and gradients to CPU.\"},\n+ )\n+ min_num_params: int = field(\n+ default=None, metadata={\"help\": \"FSDP's minimum number of parameters for Default Auto Wrapping.\"}\n+ )\n+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(\n+ default=None,\n+ metadata={\"help\": \"A list of modules to ignore for FSDP.\"},\n+ )\n+\n+ def __post_init__(self):\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, ShardingStrategy\n+ from torch.distributed.fsdp.wrap import default_auto_wrap_policy\n+\n+ if self.sharding_strategy is None:\n+ self.sharding_strategy = ShardingStrategy(int(os.environ.get(\"FSDP_SHARDING_STRATEGY\", 1)))\n+\n+ if self.cpu_offload is None:\n+ if os.environ.get(\"FSDP_OFFLOAD_PARAMS\", \"false\") == \"true\":\n+ self.cpu_offload = CPUOffload(offload_params=True)\n+ else:\n+ self.cpu_offload = CPUOffload(offload_params=False)\n+\n+ if self.min_num_params is None:\n+ self.min_num_params = int(os.environ.get(\"FSDP_MIN_NUM_PARAMS\", 0))\n+\n+ if self.auto_wrap_policy is None:\n+ if self.min_num_params > 0:\n+ self.auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=self.min_num_params)\n+\n+ self.fsdp_config = {\n+ \"sharding_strategy\": str(self.sharding_strategy),\n+ \"cpu_offload\": self.cpu_offload.offload_params,\n+ \"min_num_params\": self.min_num_params,\n+ \"is_default_auto_wrap_policy\": self.auto_wrap_policy.__name__ == \"default_auto_wrap_policy\",\n+ \"ignored_modules_present\": self.ignored_modules is not None,\n+ \"backward_prefetch_present\": self.backward_prefetch is not None,\n+ }", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -373,16 +443,33 @@ def prepare(self, *args):\n if isinstance(obj, torch.optim.Optimizer):\n obj._switch_parameters(mapping)\n \n+ if self.distributed_type == DistributedType.FSDP:\n+ result = self._prepare_fsdp(*result)\n+\n return result if len(result) > 1 else result[0]\n \n def prepare_model(self, model):\n- if self.device_placement:\n+ if self.device_placement and self.distributed_type != DistributedType.FSDP:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n+ elif self.distributed_type == DistributedType.FSDP:\n+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n+\n+ fsdp_plugin = self.state.fsdp_plugin\n+ model = FSDP(\n+ model,\n+ sharding_strategy=fsdp_plugin.sharding_strategy,\n+ cpu_offload=fsdp_plugin.cpu_offload,\n+ auto_wrap_policy=fsdp_plugin.auto_wrap_policy,\n+ backward_prefetch=fsdp_plugin.backward_prefetch,\n+ ignored_modules=fsdp_plugin.ignored_modules,\n+ ).to(self.device)\n+ if fsdp_plugin.cpu_offload.offload_params:\n+ model.to(\"cpu\")", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -326,6 +342,43 @@ def _prepare_one(self, obj, first_pass=False):\n else:\n return obj\n \n+ def _prepare_fsdp(self, *args):\n+ result = []\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ break\n+ optimizers = []\n+\n+ self._schedulers = []\n+ self._models = []\n+ intermediate_result = []\n+ for obj in args:\n+ if isinstance(obj, torch.optim.Optimizer):\n+ if len(obj.param_groups) > 1:\n+ logger.warn(\n+ \"FSDP Warning: When using FSDP, several parameter groups will be conflated into a single one due to nested module wrapping and parameter flattening.\"", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -155,6 +160,16 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n \n+ if os.environ.get(\"USE_FSDP\", \"false\") == \"true\":\n+ if version.parse(torch.__version__) < version.parse(\"1.12.0.dev20220418+cu113\"):\n+ raise ValueError(\"FSDP requires PyTorch >= 1.12.0.dev20220418+cu113\")\n+ if fsdp_plugin is None: # init from env variables\n+ fsdp_plugin = FullyShardedDataParallelPlugin()\n+ else:\n+ assert isinstance(\n+ fsdp_plugin, FullyShardedDataParallelPlugin\n+ ), \"`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.\"", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -359,6 +386,19 @@ def prepare(self, *args):\n - `torch.nn.Module`: PyTorch Module\n - `torch.optim.Optimizer`: PyTorch Optimizer\n \"\"\"\n+ if self.distributed_type == DistributedType.FSDP:\n+ model_count = 0\n+ optimizer_present = False\n+ for obj in args:\n+ if isinstance(obj, torch.nn.Module):\n+ model_count += 1\n+ if isinstance(obj, torch.optim.Optimizer):\n+ optimizer_present = True\n+ if model_count > 1 and optimizer_present:\n+ raise ValueError(\n+ \"For FSDP to work, prepare must be called for all the models before optimizers are created\"\n+ )", "from_author": true }, { "body": "Can we add the new guide without changing the indentation of the whole file? ;-)", "diff_hunk": "@@ -1,26 +1,28 @@\n-- sections: \n- - local: index\n- title: πŸ€— Accelerate\n- - local: quicktour\n- title: Quick tour\n- - local: installation\n- title: Installation\n+- sections:\n+ - local: index\n+ title: πŸ€— Accelerate\n+ - local: quicktour\n+ title: Quick tour\n+ - local: installation\n+ title: Installation\n title: Get started\n - sections:\n- - local: sagemaker\n- title: Amazon SageMaker\n+ - local: sagemaker\n+ title: Amazon SageMaker\n title: Guides\n - sections:\n- - local: accelerator\n- title: Accelerator\n- - local: launcher\n- title: Notebook Launcher\n- - local: kwargs\n- title: Kwargs Handlers\n- - local: internal\n- title: Internals\n- - local: checkpoint\n- title: Checkpointing\n- - local: tracking\n- title: Experiment Tracking\n+ - local: accelerator\n+ title: Accelerator\n+ - local: launcher\n+ title: Notebook Launcher\n+ - local: kwargs\n+ title: Kwargs Handlers\n+ - local: internal\n+ title: Internals\n+ - local: checkpoint\n+ title: Checkpointing\n+ - local: tracking\n+ title: Experiment Tracking\n+ - local: fsdp\n+ title: Fully Sharded Data Parallel", "from_author": false }, { "body": "I would put this before the API doc (so before the line [[autodoc]] above).", "diff_hunk": "@@ -0,0 +1,107 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Fully Sharded Data Parallel\n+\n+To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.\n+This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.\n+To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).\n+We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.\n+All you need to do is enable it through the config.\n+\n+## How it works out the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: FSDP\n+fsdp_config:\n+ min_num_params: 2000\n+ offload_params: false\n+ sharding_strategy: 1\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n+`Min Num Params`: FSDP\\'s minimum number of parameters for Default Auto Wrapping.\n+`Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n+```\n+\n+For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\n+After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n+For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.\n+\n+[[autodoc]] utils.FullyShardedDataParallelPlugin\n+\n+## Few caveats to be aware of:", "from_author": false }, { "body": "You should highlight that FSDP throws away any parameter groups anyway.", "diff_hunk": "@@ -0,0 +1,107 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Fully Sharded Data Parallel\n+\n+To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.\n+This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.\n+To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).\n+We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.\n+All you need to do is enable it through the config.\n+\n+## How it works out the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: FSDP\n+fsdp_config:\n+ min_num_params: 2000\n+ offload_params: false\n+ sharding_strategy: 1\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n+`Min Num Params`: FSDP\\'s minimum number of parameters for Default Auto Wrapping.\n+`Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n+```\n+\n+For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\n+After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n+For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.\n+\n+[[autodoc]] utils.FullyShardedDataParallelPlugin\n+\n+## Few caveats to be aware of:\n+\n+- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.\n+ Due to this, any optimizer created before model wrapping gets broken and occupies more memory.\n+ Hence, it is highly recommended and efficient to prepare model before creating optimizer.\n+ `Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.\n+ > FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer\n+\n+However, below is the recommended way to prepare model and optimizer while using FSDP:\n+\n+```diff\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n++ model = accelerator.prepare(model)\n+\n+optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n+\n+- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(model,\n+- optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+- )\n+\n++ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n++ optimizer, train_dataloader, eval_dataloader, lr_scheduler\n++ )\n+\n+```\n+\n+- In case of a single model, if you have created optimizer with multiple optimizer groups and called prepare with them together,\n+ then the optimizer groups will be lost and the following warning is displayed:\n+\n+ > FSDP Warning: When using FSDP, several parameter groups will be conflated into\n+ > a single one due to nested module wrapping and parameter flattening.", "from_author": false }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,107 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Fully Sharded Data Parallel\n+\n+To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.\n+This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.\n+To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).\n+We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.\n+All you need to do is enable it through the config.\n+\n+## How it works out the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: FSDP\n+fsdp_config:\n+ min_num_params: 2000\n+ offload_params: false\n+ sharding_strategy: 1\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n+`Min Num Params`: FSDP\\'s minimum number of parameters for Default Auto Wrapping.\n+`Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n+```\n+\n+For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\n+After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n+For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.\n+\n+[[autodoc]] utils.FullyShardedDataParallelPlugin\n+\n+## Few caveats to be aware of:", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -1,26 +1,28 @@\n-- sections: \n- - local: index\n- title: πŸ€— Accelerate\n- - local: quicktour\n- title: Quick tour\n- - local: installation\n- title: Installation\n+- sections:\n+ - local: index\n+ title: πŸ€— Accelerate\n+ - local: quicktour\n+ title: Quick tour\n+ - local: installation\n+ title: Installation\n title: Get started\n - sections:\n- - local: sagemaker\n- title: Amazon SageMaker\n+ - local: sagemaker\n+ title: Amazon SageMaker\n title: Guides\n - sections:\n- - local: accelerator\n- title: Accelerator\n- - local: launcher\n- title: Notebook Launcher\n- - local: kwargs\n- title: Kwargs Handlers\n- - local: internal\n- title: Internals\n- - local: checkpoint\n- title: Checkpointing\n- - local: tracking\n- title: Experiment Tracking\n+ - local: accelerator\n+ title: Accelerator\n+ - local: launcher\n+ title: Notebook Launcher\n+ - local: kwargs\n+ title: Kwargs Handlers\n+ - local: internal\n+ title: Internals\n+ - local: checkpoint\n+ title: Checkpointing\n+ - local: tracking\n+ title: Experiment Tracking\n+ - local: fsdp\n+ title: Fully Sharded Data Parallel", "from_author": true }, { "body": "Done.", "diff_hunk": "@@ -0,0 +1,107 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Fully Sharded Data Parallel\n+\n+To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.\n+This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.\n+To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).\n+We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.\n+All you need to do is enable it through the config.\n+\n+## How it works out the box\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:\n+\n+```bash\n+compute_environment: LOCAL_MACHINE\n+deepspeed_config: {}\n+distributed_type: FSDP\n+fsdp_config:\n+ min_num_params: 2000\n+ offload_params: false\n+ sharding_strategy: 1\n+machine_rank: 0\n+main_process_ip: null\n+main_process_port: null\n+main_training_function: main\n+mixed_precision: 'no'\n+num_machines: 1\n+num_processes: 2\n+use_cpu: false\n+```\n+\n+```bash\n+accelerate launch examples/nlp_example.py\n+```\n+\n+Currently, `Accelerate` supports following config through the CLI:\n+\n+```bash\n+`Sharding Strategy`: [1] FULL_SHARD, [2] SHARD_GRAD_OP\n+`Min Num Params`: FSDP\\'s minimum number of parameters for Default Auto Wrapping.\n+`Offload Params`: Decides Whether to offload parameters and gradients to CPU.\n+```\n+\n+For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.\n+After creating an instance of this class, users can pass it to the Accelerator class instantiation.\n+For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.\n+\n+[[autodoc]] utils.FullyShardedDataParallelPlugin\n+\n+## Few caveats to be aware of:\n+\n+- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.\n+ Due to this, any optimizer created before model wrapping gets broken and occupies more memory.\n+ Hence, it is highly recommended and efficient to prepare model before creating optimizer.\n+ `Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.\n+ > FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer\n+\n+However, below is the recommended way to prepare model and optimizer while using FSDP:\n+\n+```diff\n+model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n++ model = accelerator.prepare(model)\n+\n+optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)\n+\n+- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(model,\n+- optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+- )\n+\n++ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n++ optimizer, train_dataloader, eval_dataloader, lr_scheduler\n++ )\n+\n+```\n+\n+- In case of a single model, if you have created optimizer with multiple optimizer groups and called prepare with them together,\n+ then the optimizer groups will be lost and the following warning is displayed:\n+\n+ > FSDP Warning: When using FSDP, several parameter groups will be conflated into\n+ > a single one due to nested module wrapping and parameter flattening.", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/321", "pr_id": 914156436 }, { "diff": "diff --git a/docs/source/tracking.mdx b/docs/source/tracking.mdx\nindex 959317887..6d4ca437c 100644\n--- a/docs/source/tracking.mdx\n+++ b/docs/source/tracking.mdx\n@@ -28,12 +28,12 @@ Currently `Accelerate` supports three trackers out-of-the-box:\n \n To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:\n ```python\n-from accelerate import Accelerate\n+from accelerate import Accelerator\n from accelerate.utils import LoggerType\n \n-accelerator = Accelerate(log_with=\"all\") # For all available trackers in the environment\n-accelerator = Accelerate(log_with=\"wandb\")\n-accelerator = Accelerate(log_with=[\"wandb\", LoggerType.TENSORBOARD])\n+accelerator = Accelerator(log_with=\"all\") # For all available trackers in the environment\n+accelerator = Accelerator(log_with=\"wandb\")\n+accelerator = Accelerator(log_with=[\"wandb\", LoggerType.TENSORBOARD])\n ```\n \n At the start of your experiment [`~Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\n@@ -123,14 +123,14 @@ be used with the API:\n \n ```python\n tracker = MyCustomTracker(\"some_run_name\")\n-accelerator = Accelerate(log_with=tracker)\n+accelerator = Accelerator(log_with=tracker)\n ```\n \n These also can be mixed with existing trackers, including with `\"all\"`:\n \n ```python\n tracker = MyCustomTracker(\"some_run_name\")\n-accelerator = Accelerate(log_with=[tracker, \"all\"])\n+accelerator = Accelerator(log_with=[tracker, \"all\"])\n ```\n \n ## When a wrapper cannot work\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/320", "pr_id": 913898567 }, { "diff": "diff --git a/examples/by_feature/multi_process_metrics.py b/examples/by_feature/multi_process_metrics.py\nnew file mode 100644\nindex 000000000..abba1a746\n--- /dev/null\n+++ b/examples/by_feature/multi_process_metrics.py\n@@ -0,0 +1,215 @@\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to properly calculate the metrics on the\n+# validation dataset when in a distributed system, and builds off the\n+# `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To help focus on the differences in the code, building `DataLoaders`\n+# was refactored into its own function.\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ samples_seen = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ # New Code #\n+ # First we check if it's a distributed system\n+ if accelerator.num_processes > 1:\n+ # Then see if we're on the last batch of our eval dataloader\n+ if step == len(eval_dataloader):\n+ # Last batch needs to be truncated on distributed systems as it contains additional samples\n+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]\n+ references = references[: len(eval_dataloader.dataset) - samples_seen]\n+ else:\n+ # Otherwise we add the number of samples seen\n+ samples_seen += references.shape[0]\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex 354412aa5..3ecd3bf91 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -33,13 +33,14 @@\n if SRC_DIRS is not None:\n import checkpointing\n import cross_validation\n+ import multi_process_metrics\n import tracking\n \n # DataLoaders built from `test_samples/MRPC` for quick testing\n # Should mock `{script_name}.get_dataloaders` via:\n # @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n \n-EXCLUDE_EXAMPLES = [\"cross_validation.py\"]\n+EXCLUDE_EXAMPLES = [\"cross_validation.py\", \"multi_process_metrics.py\"]\n \n \n def mocked_dataloaders(accelerator, batch_size: int = 16):\n@@ -182,18 +183,6 @@ def test_checkpointing_by_steps(self):\n checkpointing.main()\n self.assertTrue(os.path.exists(os.path.join(tmpdir, \"step_2\")))\n \n- @mock.patch(\"tracking.get_dataloaders\", mocked_dataloaders)\n- def test_tracking(self):\n- with tempfile.TemporaryDirectory() as tmpdir:\n- testargs = f\"\"\"\n- tracking.py\n- --with_tracking\n- --logging_dir {tmpdir}\n- \"\"\".split()\n- with mock.patch.object(sys, \"argv\", testargs):\n- tracking.main()\n- self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n-\n @slow\n def test_cross_validation(self):\n testargs = \"\"\"\n@@ -205,3 +194,21 @@ def test_cross_validation(self):\n cross_validation.main()\n call = mocked_print.mock_calls[-1]\n self.assertGreaterEqual(call.args[1][\"accuracy\"], 0.75)\n+\n+ @mock.patch(\"multi_process_metrics.get_dataloaders\", mocked_dataloaders)\n+ def test_multi_process_metrics(self):\n+ testargs = [\"multi_process_metrics.py\"]\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ multi_process_metrics.main()\n+\n+ @mock.patch(\"tracking.get_dataloaders\", mocked_dataloaders)\n+ def test_tracking(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ tracking.py\n+ --with_tracking\n+ --logging_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ tracking.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n", "code_comments": [ { "body": "Should be named `multi_process_metrics` as the problem arises as soon as you have several processes, not several nodes.", "diff_hunk": "@@ -0,0 +1,215 @@\n+# coding=utf-8", "from_author": false }, { "body": "```suggestion\r\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,215 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Note: This is just an initial to make sure the format and whatnot looks right and then all the other examples will follow suite :) ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/319", "pr_id": 913416186 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a79012d5c..eb3b24ce9 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -725,6 +725,13 @@ def free_memory(self):\n gc.collect()\n torch.cuda.empty_cache()\n \n+ def clear(self):\n+ \"\"\"\n+ Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the\n+ garbage collector. You should call this method between two trainings with different models/optimizers.\n+ \"\"\"\n+ self.free_memory()\n+\n def _get_named_parameters(self, *args):\n named_parameters = {}\n for obj in args:\n", "code_comments": [ { "body": "```suggestion\r\n Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the garbage\r\n```", "diff_hunk": "@@ -725,6 +725,13 @@ def free_memory(self):\n gc.collect()\n torch.cuda.empty_cache()\n \n+ def clear(self):\n+ \"\"\"\n+ Alias for `Accelerate.free_memory`, releases all references to the internal objects stored and call the garbage", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/318", "pr_id": 913400743 }, { "diff": "diff --git a/examples/by_feature/README.md b/examples/by_feature/README.md\nindex 40ffa3c1e..f0db09937 100644\n--- a/examples/by_feature/README.md\n+++ b/examples/by_feature/README.md\n@@ -54,3 +54,15 @@ These arguments should be added at the end of any method for starting the python\n ```bash\n accelerate launch ./tracking.py --with_tracking\n ```\n+\n+### Cross Validation (`cross_validation.py`)\n+\n+- Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`.\n+- Arguments available:\n+ - `num_folds`, the number of folds the training dataset should be split into.\n+\n+These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:\n+\n+```bash\n+accelerate launch ./cross_validation.py --num_folds 2\n+```\ndiff --git a/examples/by_feature/cross_validation.py b/examples/by_feature/cross_validation.py\nnew file mode 100644\nindex 000000000..9fdc3fe72\n--- /dev/null\n+++ b/examples/by_feature/cross_validation.py\n@@ -0,0 +1,275 @@\n+# coding=utf-8\n+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import DatasetDict, load_dataset, load_metric\n+\n+# New Code #\n+# We'll be using StratifiedKFold for this example\n+from sklearn.model_selection import StratifiedKFold\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing how to perform Cross Validation,\n+# and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To help focus on the differences in the code, building `DataLoaders`\n+# was refactored into its own function.\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+# New Code #\n+# We need a different `get_dataloaders` function that will build dataloaders by indexs\n+\n+\n+def get_fold_dataloaders(\n+ accelerator: Accelerator, dataset: DatasetDict, train_idxs: List[int], valid_idxs: List[int], batch_size: int = 16\n+):\n+ \"\"\"\n+ Gets a set of train, valid, and test dataloaders for a particular fold\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ The main `Accelerator` object\n+ train_idxs (list of `int`):\n+ The split indicies for the training dataset\n+ valid_idxs (list of `int`):\n+ The split indicies for the validation dataset\n+ batch_size (`int`):\n+ The size of the minibatch. Default is 16\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = DatasetDict(\n+ {\n+ \"train\": dataset[\"train\"].select(train_idxs),\n+ \"validation\": dataset[\"train\"].select(valid_idxs),\n+ \"test\": dataset[\"validation\"],\n+ }\n+ )\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ test_dataloader = DataLoader(\n+ tokenized_datasets[\"test\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader, test_dataloader\n+\n+\n+def training_function(config, args):\n+ # New Code #\n+ test_labels = None\n+ test_predictions = []\n+ # Download the dataset\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ # Create our splits\n+ kfold = StratifiedKFold(n_splits=int(args.num_folds))\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+\n+ # New Code #\n+ # Create our folds:\n+ folds = kfold.split(np.zeros(datasets[\"train\"].num_rows), datasets[\"train\"][\"label\"])\n+\n+ # Iterate over them\n+ for train_idxs, valid_idxs in folds:\n+ train_dataloader, eval_dataloader, test_dataloader = get_fold_dataloaders(\n+ accelerator,\n+ datasets,\n+ train_idxs,\n+ valid_idxs,\n+ )\n+ if test_labels is None:\n+ test_labels = datasets[\"validation\"][\"label\"]\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+ # New Code #\n+ # We also run predictions on the test set at the very end\n+ fold_predictions = []\n+ for step, batch in enumerate(test_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ fold_predictions.append(predictions.cpu())\n+ metric.add_batch(\n+ predictions=predictions.argmax(dim=-1),\n+ references=references,\n+ )\n+ test_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ test_predictions.append(torch.cat(fold_predictions, dim=0))\n+ # We now need to release all our memory and get rid of the current model, optimizer, etc\n+ accelerator.free_memory()\n+ # New Code #\n+ # Finally we check the accuracy of our folded results:\n+ preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(config[\"n_splits\"])).argmax(dim=-1)\n+ test_metric = metric.compute(predictions=preds, references=test_labels)\n+ accelerator.print(\"Average test metrics from all folds:\", test_metric)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ # New Code #\n+ parser.add_argument(\"--num_folds\", type=int, default=3, help=\"The number of splits to perform across the dataset\")\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 6a9e66d6a..e348e161f 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -18,6 +18,7 @@\n import sys\n import tempfile\n import unittest\n+from distutils.util import strtobool\n from pathlib import Path\n from typing import List, Union\n from unittest import mock\n@@ -28,6 +29,36 @@\n from ..utils import gather, is_tensorflow_available\n \n \n+def parse_flag_from_env(key, default=False):\n+ try:\n+ value = os.environ[key]\n+ except KeyError:\n+ # KEY isn't set, default to `default`.\n+ _value = default\n+ else:\n+ # KEY is set, convert it to True or False.\n+ try:\n+ _value = strtobool(value)\n+ except ValueError:\n+ # More values are supported, but let's keep the message simple.\n+ raise ValueError(f\"If set, {key} must be yes or no.\")\n+ return _value\n+\n+\n+_run_slow_tests = parse_flag_from_env(\"RUN_SLOW\", default=False)\n+\n+\n+def slow(test_case):\n+ \"\"\"\n+ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a\n+ truthy value to run them.\n+ \"\"\"\n+ if not _run_slow_tests:\n+ return unittest.skip(\"test is slow\")(test_case)\n+ else:\n+ return test_case\n+\n+\n class TempDirTestCase(unittest.TestCase):\n \"\"\"\n A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nindex d60d7c08f..354412aa5 100644\n--- a/tests/test_examples.py\n+++ b/tests/test_examples.py\n@@ -22,6 +22,7 @@\n \n from accelerate import DistributedType\n from accelerate.test_utils.examples import compare_against_test\n+from accelerate.test_utils.testing import slow\n from datasets import load_dataset\n from transformers import AutoTokenizer\n \n@@ -31,12 +32,15 @@\n \n if SRC_DIRS is not None:\n import checkpointing\n+ import cross_validation\n import tracking\n \n # DataLoaders built from `test_samples/MRPC` for quick testing\n # Should mock `{script_name}.get_dataloaders` via:\n # @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n \n+EXCLUDE_EXAMPLES = [\"cross_validation.py\"]\n+\n \n def mocked_dataloaders(accelerator, batch_size: int = 16):\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n@@ -117,21 +121,22 @@ def one_complete_example(\n by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n examples_path = os.path.abspath(\"examples\")\n for item in os.listdir(by_feature_path):\n- item_path = os.path.join(by_feature_path, item)\n- if os.path.isfile(item_path) and \".py\" in item_path:\n- with self.subTest(\n- tested_script=complete_file_name,\n- feature_script=item,\n- tested_section=\"main()\" if parser_only else \"training_function()\",\n- ):\n- diff = compare_against_test(\n- os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n- )\n- diff = \"\\n\".join(diff)\n- if special_strings is not None:\n- for string in special_strings:\n- diff = diff.replace(string, \"\")\n- self.assertEqual(diff, \"\")\n+ if item not in EXCLUDE_EXAMPLES:\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n \n def test_nlp_examples(self):\n self.one_complete_example(\"complete_nlp_example.py\", True)\n@@ -188,3 +193,15 @@ def test_tracking(self):\n with mock.patch.object(sys, \"argv\", testargs):\n tracking.main()\n self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\n+\n+ @slow\n+ def test_cross_validation(self):\n+ testargs = \"\"\"\n+ cross_validation.py\n+ --num_folds 2\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ with mock.patch(\"accelerate.Accelerator.print\") as mocked_print:\n+ cross_validation.main()\n+ call = mocked_print.mock_calls[-1]\n+ self.assertGreaterEqual(call.args[1][\"accuracy\"], 0.75)\n", "code_comments": [], "context": [ { "body": "<s>Still todo is update the script docs, but code wise it's good</s> \r\n\r\nDone!", "from_author": true }, { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/317", "pr_id": 913389268 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 514b3a4f4..a79012d5c 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -29,7 +29,7 @@\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n-from .tracking import CometMLTracker, GeneralTracker, TensorBoardTracker, WandBTracker, get_available_trackers\n+from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers\n from .utils import (\n DeepSpeedPlugin,\n LoggerType,\n@@ -39,9 +39,6 @@\n extract_model_from_parallel,\n gather,\n get_pretty_name,\n- is_comet_ml_available,\n- is_tensorboard_available,\n- is_wandb_available,\n pad_across_processes,\n save,\n wait_for_everyone,\n@@ -132,32 +129,13 @@ def __init__(\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n- logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n+ logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n- loggers = []\n- if log_with is not None:\n- if not isinstance(log_with, (list, tuple)):\n- log_with = [log_with]\n- logger.debug(f\"{log_with}\")\n- if \"all\" in log_with or LoggerType.ALL in log_with:\n- loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n- else:\n- for log_type in log_with:\n- if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n- raise ValueError(\n- f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\"\n- )\n- if issubclass(type(log_type), GeneralTracker):\n- loggers.append(log_type)\n- else:\n- log_type = LoggerType(log_type)\n- if log_type not in loggers:\n- loggers.append(log_type)\n- self.log_with = loggers\n self.logging_dir = logging_dir\n+ self.log_with = filter_trackers(log_with, self.logging_dir)\n \n if mixed_precision is not None:\n mixed_precision = str(mixed_precision)\n@@ -644,12 +622,13 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n if issubclass(type(tracker), GeneralTracker):\n # Custom trackers are already initialized\n self.trackers.append(tracker)\n- elif str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n- self.trackers.append(TensorBoardTracker(project_name, self.logging_dir))\n- elif str(tracker).lower() == \"wandb\" and is_wandb_available():\n- self.trackers.append(WandBTracker(project_name))\n- elif str(tracker).lower() == \"comet_ml\" and is_comet_ml_available():\n- self.trackers.append(CometMLTracker(project_name))\n+ else:\n+ tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]\n+ if getattr(tracker_init, \"requires_logging_directory\"):\n+ # We can skip this check since it was done in `__init__`\n+ self.trackers.append(tracker_init(project_name, self.logging_dir))\n+ else:\n+ self.trackers.append(tracker_init(project_name))\n if config is not None:\n for tracker in self.trackers:\n tracker.store_init_configuration(config)\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 8d496a3d1..de3a19310 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -17,8 +17,8 @@\n \n import logging\n import os\n-from abc import ABCMeta, abstractmethod\n-from typing import Optional, Union\n+from abc import ABCMeta, abstractmethod, abstractproperty\n+from typing import List, Optional, Union\n \n from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n \n@@ -54,6 +54,13 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`.\n+ \"\"\"\n+ pass\n+\n @abstractmethod\n def store_init_configuration(self, values: dict):\n \"\"\"\n@@ -100,7 +107,9 @@ class TensorBoardTracker(GeneralTracker):\n Location for TensorBoard logs to be stored.\n \"\"\"\n \n- def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \"\"):\n+ requires_logging_directory = True\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]):\n self.run_name = run_name\n self.logging_dir = os.path.join(logging_dir, run_name)\n self.writer = tensorboard.SummaryWriter(self.logging_dir)\n@@ -157,6 +166,8 @@ class WandBTracker(GeneralTracker):\n The name of the experiment run.\n \"\"\"\n \n+ requires_logging_directory = False\n+\n def __init__(self, run_name: str):\n self.run_name = run_name\n self.run = wandb.init(self.run_name)\n@@ -209,6 +220,8 @@ class CometMLTracker(GeneralTracker):\n The name of the experiment run.\n \"\"\"\n \n+ requires_logging_directory = False\n+\n def __init__(self, run_name: str):\n self.run_name = run_name\n self.writer = Experiment(project_name=run_name)\n@@ -250,3 +263,59 @@ def finish(self):\n \"\"\"\n self.writer.end()\n logger.info(\"CometML run closed\")\n+\n+\n+LOGGER_TYPE_TO_CLASS = {\"tensorboard\": TensorBoardTracker, \"wandb\": WandBTracker, \"comet_ml\": CometMLTracker}\n+\n+\n+def filter_trackers(\n+ log_with: List[Union[str, LoggerType, GeneralTracker]], logging_dir: Union[str, os.PathLike] = None\n+):\n+ \"\"\"\n+ Takes in a list of potential tracker types and checks that:\n+ - The tracker wanted is available in that environment\n+ - Filters out repeats of tracker types\n+ - If `all` is in `log_with`, will return all trackers in the environment\n+ - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`\n+\n+ Args:\n+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):\n+ A list of loggers to be setup for experiment tracking. Should be one or several of:\n+\n+ - `\"all\"`\n+ - `\"tensorboard\"`\n+ - `\"wandb\"`\n+ - `\"comet_ml\"`\n+ If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\n+ accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n+ logging_dir (`str`, `os.PathLike`, *optional*):\n+ A path to a directory for storing logs of locally-compatible loggers.\n+ \"\"\"\n+ loggers = []\n+ if log_with is not None:\n+ if not isinstance(log_with, (list, tuple)):\n+ log_with = [log_with]\n+ logger.debug(f\"{log_with}\")\n+ if \"all\" in log_with or LoggerType.ALL in log_with:\n+ loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n+ else:\n+ for log_type in log_with:\n+ if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n+ raise ValueError(f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\")\n+ if issubclass(type(log_type), GeneralTracker):\n+ loggers.append(log_type)\n+ else:\n+ log_type = LoggerType(log_type)\n+ if log_type not in loggers:\n+ if log_type in get_available_trackers():\n+ tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]\n+ if getattr(tracker_init, \"requires_logging_directory\"):\n+ if logging_dir is None:\n+ raise ValueError(\n+ f\"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in.\"\n+ )\n+ loggers.append(log_type)\n+ else:\n+ logger.info(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\n+\n+ return loggers\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex b3974afe2..c5b940a46 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -103,6 +103,12 @@ def test_log(self):\n self.assertEqual(iteration, values[\"iteration\"])\n self.assertEqual(my_text, values[\"my_text\"])\n \n+ def test_logging_dir(self):\n+ with self.assertRaisesRegex(ValueError, \"Logging with `tensorboard` requires a `logging_dir`\"):\n+ _ = Accelerator(log_with=\"tensorboard\")\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ _ = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n+\n \n @mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n class WandBTrackingTest(TempDirTestCase, MockingTestCase):\n@@ -237,6 +243,8 @@ class MyCustomTracker(GeneralTracker):\n \"some_string\",\n ]\n \n+ requires_logging_directory = False\n+\n def __init__(self, dir: str):\n self.f = open(f\"{dir}/log.csv\", \"w+\")\n self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)\n", "code_comments": [ { "body": "Why not use a class attribute directly? We could set a default then (I don't think this actually defaults to False).", "diff_hunk": "@@ -54,6 +54,14 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`. The\n+ default behavior is `False`\n+ \"\"\"\n+ pass", "from_author": false }, { "body": "Very nice!", "diff_hunk": "@@ -644,12 +653,13 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n if issubclass(type(tracker), GeneralTracker):\n # Custom trackers are already initialized\n self.trackers.append(tracker)\n- elif str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n- self.trackers.append(TensorBoardTracker(project_name, self.logging_dir))\n- elif str(tracker).lower() == \"wandb\" and is_wandb_available():\n- self.trackers.append(WandBTracker(project_name))\n- elif str(tracker).lower() == \"comet_ml\" and is_comet_ml_available():\n- self.trackers.append(CometMLTracker(project_name))\n+ else:", "from_author": false }, { "body": "This won't right. (This is also evidence of me going back and forth with it). Having it like this IMO forces implementations to think about how they want the behavior to go, and have it explicit in the source code for users interested in the code to see. \r\n\r\nFor example, forcing it to be an abstract property/function, W&B could eventually make it a conditional where if it's in `offline_mode` (by checking the env var), then it would be True, otherwise False.\r\n\r\nBut, I'm not against just making it a normal class attr directly", "diff_hunk": "@@ -54,6 +54,14 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`. The\n+ default behavior is `False`\n+ \"\"\"\n+ pass", "from_author": true }, { "body": "In that case, you should remove the default behavior mention in the docstring, as there is no default :-)", "diff_hunk": "@@ -54,6 +54,14 @@ class GeneralTracker(object, metaclass=ABCMeta):\n A base Tracker class to be used for all logging integration implementations.\n \"\"\"\n \n+ @abstractproperty\n+ def requires_logging_directory(self):\n+ \"\"\"\n+ Whether the logger requires a directory to store their logs. Should either return `True` or `False`. The\n+ default behavior is `False`\n+ \"\"\"\n+ pass", "from_author": false }, { "body": "Much cleaner!", "diff_hunk": "@@ -132,32 +129,13 @@ def __init__(\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n- logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n+ logging_dir: Optional[Union[str, os.PathLike]] = None,\n dispatch_batches: Optional[bool] = None,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n- loggers = []\n- if log_with is not None:\n- if not isinstance(log_with, (list, tuple)):\n- log_with = [log_with]\n- logger.debug(f\"{log_with}\")\n- if \"all\" in log_with or LoggerType.ALL in log_with:\n- loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n- else:\n- for log_type in log_with:\n- if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n- raise ValueError(\n- f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\"\n- )\n- if issubclass(type(log_type), GeneralTracker):\n- loggers.append(log_type)\n- else:\n- log_type = LoggerType(log_type)\n- if log_type not in loggers:\n- loggers.append(log_type)\n- self.log_with = loggers\n self.logging_dir = logging_dir\n+ self.log_with = filter_trackers(log_with, self.logging_dir)", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "@sgugger pinging for a rereview just to make sure the refactor seems sound to you πŸ˜„ ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/316", "pr_id": 910332479 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex e18686e12..514b3a4f4 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -440,7 +440,10 @@ def _prepare_deepspeed(self, *args):\n batch_size_per_device * deepspeed_plugin.gradient_accumulation_steps * self.num_processes\n )\n \n- result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+ result = [\n+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj\n+ for obj in args\n+ ]\n \n model = None\n optimizer = None\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/315", "pr_id": 908519146 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex c213a9167..5020c6cc4 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -14,4 +14,17 @@ jobs:\n - name: Install Python dependencies\n run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n - name: Run Tests\n- run: make test\n\\ No newline at end of file\n+ run: make test\n+ \n+ test_examples:\n+ runs-on: ubuntu-latest\n+ steps:\n+ - uses: actions/checkout@v2\n+ - name: Set up Python 3.6\n+ uses: actions/setup-python@v2\n+ with:\n+ python-version: 3.6\n+ - name: Install Python dependencies\n+ run: pip install setuptools==59.5.0; pip install -e .[test] tensorboard\n+ - name: Run Tests\n+ run: make test_examples\n\\ No newline at end of file\ndiff --git a/Makefile b/Makefile\nindex e5b557f10..db7893565 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -25,4 +25,7 @@ style:\n \t\n # Run tests for the library\n test:\n-\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/\n\\ No newline at end of file\n+\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/ --ignore=./tests/test_examples.py\n+\n+test_examples:\n+\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/test_examples.py\ndiff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nindex 35205107d..e595a8db7 100644\n--- a/examples/by_feature/checkpointing.py\n+++ b/examples/by_feature/checkpointing.py\n@@ -115,14 +115,17 @@ def training_function(config, args):\n \n # New Code #\n # Parse out whether we are saving every epoch or after a certain number of batches\n- if args.checkpointing_steps == \"epoch\":\n- checkpointing_steps = args.checkpointing_steps\n- elif args.checkpointing_steps.isdigit():\n- checkpointing_steps = int(args.checkpointing_steps)\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n else:\n- raise ValueError(\n- f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n- )\n+ checkpointing_steps = None\n \n set_seed(seed)\n \n@@ -162,21 +165,29 @@ def training_function(config, args):\n \n # New Code #\n # We need to keep track of how many total steps we have iterated over\n- if isinstance(checkpointing_steps, int):\n- overall_step = 0\n+ overall_step = 0\n \n # We need to load the checkpoint back in before training here with `load_state`\n # The total number of epochs is adjusted based on where the state is being loaded from,\n # as we assume continuation of the same training script\n if args.resume_from_checkpoint:\n- accelerator.print(f\"Resuming from checkpoint: {args.resume_from_checkpoint}\")\n- accelerator.load_state(args.resume_from_checkpoint)\n-\n- if \"epoch\" in args.resume_from_checkpoint:\n- num_epochs -= int(args.resume_from_checkpoint.replace(\"epoch_\", \"\"))\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ path = os.path.basename(args.resume_from_checkpoint)\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n resume_step = None\n else:\n- resume_step = int(args.resume_from_checkpoint.replace(\"step_\", \"\"))\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n num_epochs -= resume_step // len(train_dataloader)\n # If resuming by step, we also need to know exactly how far into the DataLoader we went\n resume_step = (num_epochs * len(train_dataloader)) - resume_step\n@@ -200,6 +211,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ # New Code #\n+ overall_step += 1\n \n # New Code #\n # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state`\n@@ -237,7 +250,7 @@ def training_function(config, args):\n # Will contain files: \"pytorch_model.bin\", \"optimizer.bin\", \"scheduler.bin\", and \"random_states.pkl\"\n # If mixed precision was used, will also save a \"scalar.bin\" file\n if checkpointing_steps == \"epoch\":\n- output_dir = f\"epoch_{num_epochs}\"\n+ output_dir = f\"epoch_{epoch}\"\n if args.output_dir is not None:\n output_dir = os.path.join(args.output_dir, output_dir)\n accelerator.save_state(output_dir)\n@@ -258,7 +271,7 @@ def main():\n parser.add_argument(\n \"--checkpointing_steps\",\n type=str,\n- default=\"epoch\",\n+ default=None,\n help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n )\n parser.add_argument(\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nindex ca11fa04b..170211e5e 100644\n--- a/examples/by_feature/tracking.py\n+++ b/examples/by_feature/tracking.py\n@@ -13,6 +13,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import argparse\n+import logging\n+import os\n \n import torch\n from torch.utils.data import DataLoader\n@@ -28,6 +30,9 @@\n )\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n ########################################################################\n # This is a fully working simple example to use Accelerate,\n # specifically showcasing the experiment tracking capability,\n@@ -110,7 +115,9 @@ def training_function(config, args):\n # Note: If using a custom `Tracker` class, should be passed in here such as:\n # >>> log_with = [\"all\", MyCustomTrackerClassInstance()]\n if args.with_tracking:\n- accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\", logging_dir=args.logging_dir\n+ )\n else:\n accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n@@ -158,7 +165,10 @@ def training_function(config, args):\n # New Code #\n # We need to initalize the trackers we use. Overall configurations can also be stored\n if args.with_tracking:\n- accelerator.init_trackers(\"accelerate_glue_with_tracking\", config)\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Now we train the model\n for epoch in range(num_epochs):\n@@ -173,7 +183,8 @@ def training_function(config, args):\n outputs = model(**batch)\n loss = outputs.loss\n # New Code #\n- total_loss += loss.detach().float()\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n loss = loss / gradient_accumulation_steps\n accelerator.backward(loss)\n if step % gradient_accumulation_steps == 0:\n@@ -202,14 +213,21 @@ def training_function(config, args):\n # New Code #\n # To actually log, we call `Accelerator.log`\n # The values passed can be of `str`, `int`, or `float`\n- accelerator.log(\n- {\"accuracy\": eval_metric[\"accuracy\"], \"f1\": eval_metric[\"f1\"], \"train_loss\": total_loss, \"epoch\": epoch}\n- )\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"accuracy\": eval_metric[\"accuracy\"],\n+ \"f1\": eval_metric[\"f1\"],\n+ \"train_loss\": total_loss,\n+ \"epoch\": epoch,\n+ }\n+ )\n \n # New Code #\n # When a run is finished, you should call `accelerator.end_training()`\n # to close all of the open trackers\n- accelerator.end_training()\n+ if args.with_tracking:\n+ accelerator.end_training()\n \n \n def main():\n@@ -229,6 +247,12 @@ def main():\n action=\"store_true\",\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nindex e62568a39..01033498c 100644\n--- a/examples/complete_cv_example.py\n+++ b/examples/complete_cv_example.py\n@@ -74,16 +74,12 @@ def __getitem__(self, idx):\n def training_function(config, args):\n # Initialize accelerator\n if args.with_tracking:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\", logging_dir=args.logging_dir\n+ )\n else:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n \n- if hasattr(args.checkpointing_steps, \"isdigit\"):\n- checkpointing_steps = args.checkpointing_steps\n- if args.checkpointing_steps.isdigit():\n- checkpointing_steps = int(args.checkpointing_steps)\n- else:\n- checkpointing_steps = None\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n@@ -93,9 +89,25 @@ def training_function(config, args):\n if not isinstance(image_size, (list, tuple)):\n image_size = (image_size, image_size)\n \n+ # Parse out whether we are saving every epoch or after a certain number of batches\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+ else:\n+ checkpointing_steps = None\n+\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- accelerator.init_trackers(\"cv_example\", config)\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n # Grab all the image filenames\n file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(\".jpg\")]\n@@ -163,24 +175,27 @@ def training_function(config, args):\n )\n \n # Potentially load in the weights and states from a previous save\n- state_restored = True\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n accelerator.load_state(args.resume_from_checkpoint)\n- resume_step = None\n+ path = os.path.basename(args.resume_from_checkpoint)\n else:\n # Get the most recent checkpoint\n dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n dirs.sort(key=os.path.getctime)\n path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n- if \"epoch\" in path.name:\n- num_epochs -= int(path.name.replace(\"epoch_\", \"\"))\n- else:\n- resume_step = int(path.name.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n- state_restored = False\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ resume_step = None\n+ else:\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ # If resuming by step, we also need to know exactly how far into the DataLoader we went\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n \n overall_step = 0\n # Now we train the model\n@@ -190,8 +205,9 @@ def training_function(config, args):\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n- if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n- continue\n+ if args.resume_from_checkpoint and epoch == 0:\n+ if resume_step is not None and step < resume_step:\n+ pass\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n inputs = (batch[\"image\"] - mean) / std\n@@ -206,32 +222,40 @@ def training_function(config, args):\n optimizer.zero_grad()\n overall_step += 1\n if isinstance(checkpointing_steps, int):\n+ output_dir = f\"step_{overall_step}\"\n if overall_step % checkpointing_steps == 0:\n- accelerator.save_state(f\"step_{overall_step}\")\n- if state_restored:\n- model.eval()\n- accurate = 0\n- num_elems = 0\n- for step, batch in enumerate(eval_dataloader):\n- # We could avoid this line since we set the accelerator with `device_placement=True`.\n- batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n- inputs = (batch[\"image\"] - mean) / std\n- with torch.no_grad():\n- outputs = model(inputs)\n- predictions = outputs.argmax(dim=-1)\n- accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n- num_elems += accurate_preds.shape[0]\n- accurate += accurate_preds.long().sum()\n-\n- eval_metric = accurate.item() / num_elems\n- # Use accelerator.print to print only on the main process.\n- accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n- if args.with_tracking:\n- accelerator.log(\n- {\"accuracy\": 100 * eval_metric, \"total_loss\": total_loss, \"epoch\": epoch}, step=overall_step\n- )\n- if args.checkpointing_steps == \"epoch\":\n- accelerator.save_state(f\"epoch_{epoch}\")\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+ model.eval()\n+ accurate = 0\n+ num_elems = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n+ inputs = (batch[\"image\"] - mean) / std\n+ with torch.no_grad():\n+ outputs = model(inputs)\n+ predictions = outputs.argmax(dim=-1)\n+ accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ num_elems += accurate_preds.shape[0]\n+ accurate += accurate_preds.long().sum()\n+\n+ eval_metric = accurate.item() / num_elems\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\"accuracy\": 100 * eval_metric, \"total_loss\": total_loss, \"epoch\": epoch}, step=overall_step\n+ )\n+ if checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{epoch}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+ if args.with_tracking:\n+ accelerator.end_training()\n \n \n def main():\n@@ -254,6 +278,12 @@ def main():\n default=None,\n help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n parser.add_argument(\n \"--resume_from_checkpoint\",\n type=str,\n@@ -262,9 +292,15 @@ def main():\n )\n parser.add_argument(\n \"--with_tracking\",\n- required=False,\n+ action=\"store_true\",\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n args = parser.parse_args()\n config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\n training_function(config, args)\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nindex 57125aa29..a0e8d568c 100644\n--- a/examples/complete_nlp_example.py\n+++ b/examples/complete_nlp_example.py\n@@ -55,14 +55,21 @@\n def training_function(config, args):\n # Initialize accelerator\n if args.with_tracking:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ accelerator = Accelerator(\n+ cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\", logging_dir=args.logging_dir\n+ )\n else:\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n \n if hasattr(args.checkpointing_steps, \"isdigit\"):\n- checkpointing_steps = args.checkpointing_steps\n- if args.checkpointing_steps.isdigit():\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n else:\n checkpointing_steps = None\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n@@ -74,7 +81,10 @@ def training_function(config, args):\n \n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n- accelerator.init_trackers(\"nlp_example\", config)\n+ run = os.path.split(__file__)[-1].split(\".\")[0]\n+ if args.logging_dir:\n+ run = os.path.join(args.logging_dir, run)\n+ accelerator.init_trackers(run, config)\n \n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\n@@ -143,27 +153,31 @@ def collate_fn(examples):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n+ overall_step = 0\n+\n # Potentially load in the weights and states from a previous save\n- state_restored = True\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n accelerator.load_state(args.resume_from_checkpoint)\n- resume_step = None\n+ path = os.path.basename(args.resume_from_checkpoint)\n else:\n # Get the most recent checkpoint\n dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n dirs.sort(key=os.path.getctime)\n path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n- if \"epoch\" in path.name:\n- num_epochs -= int(path.name.replace(\"epoch_\", \"\"))\n- else:\n- resume_step = int(path.name.replace(\"step_\", \"\"))\n- num_epochs -= resume_step // len(train_dataloader)\n- resume_step = (num_epochs * len(train_dataloader)) - resume_step\n- state_restored = False\n+ # Extract `epoch_{i}` or `step_{i}`\n+ training_difference = os.path.splitext(path)[0]\n+\n+ if \"epoch\" in training_difference:\n+ num_epochs -= int(training_difference.replace(\"epoch_\", \"\"))\n+ resume_step = None\n+ else:\n+ resume_step = int(training_difference.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ # If resuming by step, we also need to know exactly how far into the DataLoader we went\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n \n- overall_step = 0\n # Now we train the model\n for epoch in range(num_epochs):\n model.train()\n@@ -171,8 +185,9 @@ def collate_fn(examples):\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n- if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n- continue\n+ if args.resume_from_checkpoint and epoch == 0:\n+ if resume_step is not None and step < resume_step:\n+ pass\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n outputs = model(**batch)\n@@ -190,42 +205,51 @@ def collate_fn(examples):\n overall_step += 1\n \n if isinstance(checkpointing_steps, int):\n+ output_dir = f\"step_{overall_step}\"\n if overall_step % checkpointing_steps == 0:\n- accelerator.save_state(f\"step_{overall_step}\")\n- if state_restored:\n- model.eval()\n- for step, batch in enumerate(eval_dataloader):\n- # We could avoid this line since we set the accelerator with `device_placement=True`.\n- batch.to(accelerator.device)\n- with torch.no_grad():\n- outputs = model(**batch)\n- predictions = outputs.logits.argmax(dim=-1)\n- metric.add_batch(\n- predictions=accelerator.gather(predictions),\n- references=accelerator.gather(batch[\"labels\"]),\n- )\n-\n- eval_metric = metric.compute()\n- # Use accelerator.print to print only on the main process.\n- accelerator.print(f\"epoch {epoch}:\", eval_metric)\n- if args.with_tracking:\n- accelerator.log(\n- {\n- \"accuracy\": eval_metric[\"accuracy\"],\n- \"f1\": eval_metric[\"f1\"],\n- \"total_loss\": total_loss,\n- \"epoch\": epoch,\n- },\n- step=overall_step,\n- )\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"accuracy\": eval_metric[\"accuracy\"],\n+ \"f1\": eval_metric[\"f1\"],\n+ \"train_loss\": total_loss,\n+ \"epoch\": epoch,\n+ }\n+ )\n+\n+ if checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{epoch}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n \n- if args.checkpointing_steps == \"epoch\":\n- accelerator.save_state(f\"epoch_{epoch}\")\n+ if args.with_tracking:\n+ accelerator.end_training()\n \n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n@@ -250,9 +274,21 @@ def main():\n )\n parser.add_argument(\n \"--with_tracking\",\n- required=False,\n+ action=\"store_true\",\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--logging_dir\",\n+ type=str,\n+ default=\"logs\",\n+ help=\"Location on where to store experiment tracking logs`\",\n+ )\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex b14b067be..a80b42d13 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -73,7 +73,7 @@ def __getitem__(self, idx):\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mix_precision)\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mix_precision)\n \n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n@@ -185,7 +185,6 @@ def training_function(config, args):\n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n parser.add_argument(\"--data_dir\", required=True, help=\"The data folder on disk.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n@@ -195,6 +194,12 @@ def main():\n \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n \"and an Nvidia Ampere GPU.\",\n )\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 915d35807..87cfd7698 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -49,21 +49,19 @@\n EVAL_BATCH_SIZE = 32\n \n \n-def training_function(config, args):\n- # Initialize accelerator\n- accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n- # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n- lr = config[\"lr\"]\n- num_epochs = int(config[\"num_epochs\"])\n- correct_bias = config[\"correct_bias\"]\n- seed = int(config[\"seed\"])\n- batch_size = int(config[\"batch_size\"])\n-\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n datasets = load_dataset(\"glue\", \"mrpc\")\n- metric = load_metric(\"glue\", \"mrpc\")\n-\n- set_seed(seed)\n \n def tokenize_function(examples):\n # max_length=None => use the model max length (it's actually the default)\n@@ -81,12 +79,6 @@ def tokenize_function(examples):\n # transformers library\n tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n \n- # If the batch size is too big we use gradient accumulation\n- gradient_accumulation_steps = 1\n- if batch_size > MAX_GPU_BATCH_SIZE:\n- gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n- batch_size = MAX_GPU_BATCH_SIZE\n-\n def collate_fn(examples):\n # On TPU it's best to pad everything to the same length or training will be very slow.\n if accelerator.distributed_type == DistributedType.TPU:\n@@ -101,6 +93,29 @@ def collate_fn(examples):\n tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n )\n \n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ set_seed(seed)\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n \n@@ -148,9 +163,10 @@ def collate_fn(examples):\n with torch.no_grad():\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n metric.add_batch(\n- predictions=accelerator.gather(predictions),\n- references=accelerator.gather(batch[\"labels\"]),\n+ predictions=predictions,\n+ references=references,\n )\n \n eval_metric = metric.compute()\ndiff --git a/setup.py b/setup.py\nindex 0ce1e5af4..7e26759d1 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,12 @@\n extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n- \"pytest-xdist\"\n+ \"pytest-xdist\",\n+ \"pytest-subtests\",\n+ \"datasets\",\n+ \"transformers\",\n+ \"scipy\",\n+ \"sklearn\"\n ]\n extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\ndiff --git a/src/accelerate/test_utils/examples.py b/src/accelerate/test_utils/examples.py\nnew file mode 100644\nindex 000000000..4e4092c0e\n--- /dev/null\n+++ b/src/accelerate/test_utils/examples.py\n@@ -0,0 +1,139 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\"\"\"\n+A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each\n+`examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the\n+others are used to either get the code that matters, or to preprocess them (such as stripping comments)\n+\"\"\"\n+\n+import os\n+from typing import List\n+\n+\n+def get_function_contents_by_name(lines: List[str], name: str):\n+ \"\"\"\n+ Extracts a function from `lines` of segmented source code with the name `name`.\n+\n+ Args:\n+ lines (`List[str]`):\n+ Source code of a script seperated by line.\n+ name (`str`):\n+ The name of the function to extract. Should be either `training_function` or `main`\n+ \"\"\"\n+ if name != \"training_function\" and name != \"main\":\n+ raise ValueError(f\"Incorrect function name passed: {name}, choose either 'main' or 'training_function'\")\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and f\"def {name}\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if name == \"training_function\" and \"def main\" in line:\n+ return good_lines\n+ if name == \"main\" and \"if __name__\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def clean_lines(lines: List[str]):\n+ \"\"\"\n+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\\n')\n+\n+ Args:\n+ lines (`List[str]`):\n+ Source code of a script seperated by line.\n+ \"\"\"\n+ return [line for line in lines if not line.lstrip().startswith(\"#\") and line != \"\\n\"]\n+\n+\n+def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):\n+ \"\"\"\n+ Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be\n+ used when testing to see if `complete_*_.py` examples have all of the implementations from each of the\n+ `examples/by_feature/*` scripts.\n+\n+ It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code\n+ is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the\n+ `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter.\n+\n+ Args:\n+ base_filename (`str` or `os.PathLike`):\n+ The filepath of a single \"complete\" example script to test, such as `examples/complete_cv_example.py`\n+ feature_filename (`str` or `os.PathLike`):\n+ The filepath of a single feature example script. The contents of this script are checked to see if they\n+ exist in `base_filename`\n+ parser_only (`bool`):\n+ Whether to compare only the `main()` sections in both files, or to compare the contents of\n+ `training_loop()`\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary filepath that should be included in the check. This function extracts the base\n+ functionalities off of \"examples/nlp_example.py\", so if `base_filename` is a script other than\n+ `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`\n+ \"\"\"\n+ with open(base_filename, \"r\") as f:\n+ base_file_contents = f.readlines()\n+ with open(os.path.abspath(os.path.join(\"examples\", \"nlp_example.py\")), \"r\") as f:\n+ full_file_contents = f.readlines()\n+ with open(feature_filename, \"r\") as f:\n+ feature_file_contents = f.readlines()\n+ if secondary_filename is not None:\n+ with open(secondary_filename, \"r\") as f:\n+ secondary_file_contents = f.readlines()\n+\n+ # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content\n+ if parser_only:\n+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, \"main\"))\n+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, \"main\"))\n+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, \"main\"))\n+ if secondary_filename is not None:\n+ secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, \"main\"))\n+ else:\n+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, \"training_function\"))\n+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, \"training_function\"))\n+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, \"training_function\"))\n+ if secondary_filename is not None:\n+ secondary_file_func = clean_lines(\n+ get_function_contents_by_name(secondary_file_contents, \"training_function\")\n+ )\n+\n+ _dl_line = \"train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\\n\"\n+\n+ # Specific code in our script that differs from the full version, aka what is new\n+ new_feature_code = []\n+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement\n+ for i, line in enumerate(feature_file_func):\n+ if i not in passed_idxs:\n+ if (line not in full_file_func) and (line.lstrip() != _dl_line):\n+ new_feature_code.append(line)\n+ passed_idxs.append(i)\n+\n+ # Extract out just the new parts from the full_file_training_func\n+ new_full_example_parts = []\n+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement\n+ for i, line in enumerate(base_file_func):\n+ if i not in passed_idxs:\n+ if (line not in full_file_func) and (line.lstrip() != _dl_line):\n+ new_full_example_parts.append(line)\n+ passed_idxs.append(i)\n+\n+ # Finally, get the overall diff\n+ diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]\n+ if secondary_filename is not None:\n+ diff_from_two = [line for line in full_file_contents if line not in secondary_file_func]\n+ diff_from_example = [line for line in diff_from_example if line not in diff_from_two]\n+\n+ return diff_from_example\ndiff --git a/tests/test_examples.py b/tests/test_examples.py\nnew file mode 100644\nindex 000000000..d60d7c08f\n--- /dev/null\n+++ b/tests/test_examples.py\n@@ -0,0 +1,190 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import tempfile\n+import unittest\n+from unittest import mock\n+\n+from torch.utils.data import DataLoader\n+\n+from accelerate import DistributedType\n+from accelerate.test_utils.examples import compare_against_test\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n+\n+\n+SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n+sys.path.extend(SRC_DIRS)\n+\n+if SRC_DIRS is not None:\n+ import checkpointing\n+ import tracking\n+\n+# DataLoaders built from `test_samples/MRPC` for quick testing\n+# Should mock `{script_name}.get_dataloaders` via:\n+# @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+class ExampleDifferenceTests(unittest.TestCase):\n+ \"\"\"\n+ This TestCase checks that all of the `complete_*` scripts contain all of the\n+ information found in the `by_feature` scripts, line for line. If one fails,\n+ then a complete example does not contain all of the features in the features\n+ scripts, and should be updated.\n+\n+ Each example script should be a single test (such as `test_nlp_example`),\n+ and should run `one_complete_example` twice: once with `parser_only=True`,\n+ and the other with `parser_only=False`. This is so that when the test\n+ failures are returned to the user, they understand if the discrepancy lies in\n+ the `main` function, or the `training_loop` function. Otherwise it will be\n+ unclear.\n+\n+ Also, if there are any expected differences between the base script used and\n+ `complete_nlp_example.py` (the canonical base script), these should be included in\n+ `special_strings`. These would be differences in how something is logged, print statements,\n+ etc (such as calls to `Accelerate.log()`)\n+ \"\"\"\n+\n+ def one_complete_example(\n+ self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None\n+ ):\n+ \"\"\"\n+ Tests a single `complete` example against all of the implemented `by_feature` scripts\n+\n+ Args:\n+ complete_file_name (`str`):\n+ The filename of a complete example\n+ parser_only (`bool`):\n+ Whether to look at the main training function, or the argument parser\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary base file to strip all script information not relevant for checking,\n+ such as \"cv_example.py\" when testing \"complete_cv_example.py\"\n+ special_strings (`list`, *optional*):\n+ A list of strings to potentially remove before checking no differences are left. These should be\n+ diffs that are file specific, such as different logging variations between files.\n+ \"\"\"\n+ self.maxDiff = None\n+ by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n+ examples_path = os.path.abspath(\"examples\")\n+ for item in os.listdir(by_feature_path):\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n+\n+ def test_nlp_examples(self):\n+ self.one_complete_example(\"complete_nlp_example.py\", True)\n+ self.one_complete_example(\"complete_nlp_example.py\", False)\n+\n+ def test_cv_examples(self):\n+ cv_path = os.path.abspath(os.path.join(\"examples\", \"cv_example.py\"))\n+ special_strings = [\n+ \" \" * 16 + \"{\\n\\n\",\n+ \" \" * 18 + '\"accuracy\": eval_metric[\"accuracy\"],\\n\\n',\n+ \" \" * 18 + '\"f1\": eval_metric[\"f1\"],\\n\\n',\n+ \" \" * 18 + '\"train_loss\": total_loss,\\n\\n',\n+ \" \" * 18 + '\"epoch\": epoch,\\n\\n',\n+ \" \" * 16 + \"}\\n\",\n+ \" \" * 8,\n+ ]\n+ self.one_complete_example(\"complete_cv_example.py\", True, cv_path, special_strings)\n+ self.one_complete_example(\"complete_cv_example.py\", False, cv_path, special_strings)\n+\n+\n+class FeatureExamplesTests(unittest.TestCase):\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_epoch(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps epoch\n+ --output_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"epoch_0\")))\n+\n+ @mock.patch(\"checkpointing.get_dataloaders\", mocked_dataloaders)\n+ def test_checkpointing_by_steps(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ checkpointing.py\n+ --checkpointing_steps 2\n+ --output_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ checkpointing.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"step_2\")))\n+\n+ @mock.patch(\"tracking.get_dataloaders\", mocked_dataloaders)\n+ def test_tracking(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ testargs = f\"\"\"\n+ tracking.py\n+ --with_tracking\n+ --logging_dir {tmpdir}\n+ \"\"\".split()\n+ with mock.patch.object(sys, \"argv\", testargs):\n+ tracking.main()\n+ self.assertTrue(os.path.exists(os.path.join(tmpdir, \"tracking\")))\ndiff --git a/tests/test_samples/MRPC/dev.csv b/tests/test_samples/MRPC/dev.csv\nnew file mode 100644\nindex 000000000..96beccda9\n--- /dev/null\n+++ b/tests/test_samples/MRPC/dev.csv\n@@ -0,0 +1,7 @@\n+label,sentence1,sentence2\n+equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,\"\"\" The foodservice pie business does not fit our long-term growth strategy .\"\n+not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,\"His wife said he was \"\" 100 percent behind George Bush \"\" and looked forward to using his years of training in the war .\"\n+not_equivalent,\"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .\",\"The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .\"\n+equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .\n+not_equivalent,No dates have been set for the civil or the criminal trial .,\"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .\"\n+equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .\ndiff --git a/tests/test_samples/MRPC/train.csv b/tests/test_samples/MRPC/train.csv\nnew file mode 100644\nindex 000000000..96beccda9\n--- /dev/null\n+++ b/tests/test_samples/MRPC/train.csv\n@@ -0,0 +1,7 @@\n+label,sentence1,sentence2\n+equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,\"\"\" The foodservice pie business does not fit our long-term growth strategy .\"\n+not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,\"His wife said he was \"\" 100 percent behind George Bush \"\" and looked forward to using his years of training in the war .\"\n+not_equivalent,\"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .\",\"The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent .\"\n+equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries .\n+not_equivalent,No dates have been set for the civil or the criminal trial .,\"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty .\"\n+equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .\n", "code_comments": [ { "body": "Why not always do this?", "diff_hunk": "@@ -200,6 +200,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ if isinstance(checkpointing_steps, int):\n+ overall_step += 1", "from_author": false }, { "body": "It's only needed if we save via checkpointing steps rather than epoch, so didn't want people to assume we *always* need to do that. \r\n\r\n(Which means a comment is needed!)", "diff_hunk": "@@ -200,6 +200,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ if isinstance(checkpointing_steps, int):\n+ overall_step += 1", "from_author": true }, { "body": "Yeah I get the variable won't be used if we don't checkpoint with steps, but it doesn't hurt to always have it (and would save one line of code).", "diff_hunk": "@@ -200,6 +200,8 @@ def training_function(config, args):\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n+ if isinstance(checkpointing_steps, int):\n+ overall_step += 1", "from_author": false }, { "body": "```suggestion\r\n# Copyright 2022 The HuggingFace Team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.", "from_author": false }, { "body": "A quick intro here would be useful.", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+", "from_author": false }, { "body": "```suggestion\r\ndef get_train_func(lines: list):\r\n```\r\nIf we use type annotations, better use `List[str]` here.", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import logging\n+import os\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_train_func(lines: list):", "from_author": false }, { "body": "Sounds like it could be refactored with the previous one and an extra arg?", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import logging\n+import os\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_train_func(lines: list):\n+ \"\"\"\n+ Finds the main training function from inside segmented source code.\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and \"def training_function\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if \"def main\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def get_main_func(lines: list):", "from_author": false }, { "body": "Those do not match the signature. Also this could be expanded a little bit, I'm not sure I fully understand.", "diff_hunk": "@@ -0,0 +1,132 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\n+import logging\n+import os\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_train_func(lines: list):\n+ \"\"\"\n+ Finds the main training function from inside segmented source code.\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and \"def training_function\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if \"def main\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def get_main_func(lines: list):\n+ \"\"\"\n+ Finds the main function from inside segmented source code\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ good_lines, found_start = [], False\n+ for line in lines:\n+ if not found_start and \"def main\" in line:\n+ found_start = True\n+ good_lines.append(line)\n+ continue\n+ if found_start:\n+ if \"if __name__\" in line:\n+ return good_lines\n+ good_lines.append(line)\n+\n+\n+def clean_lines(lines: list):\n+ \"\"\"\n+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\\n')\n+\n+ Args:\n+ lines (`list`):\n+ Source code of a script\n+ \"\"\"\n+ return [line for line in lines if not line.lstrip().startswith(\"#\") and line != \"\\n\"]\n+\n+\n+def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):\n+ \"\"\"\n+ Checks whether the content aligned in `test_filename` is included inside of `full_filename`.", "from_author": false }, { "body": "```suggestion\r\n# Copyright 2022 The HuggingFace Team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,176 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.", "from_author": false }, { "body": "It seems we always use the method with both flags, should we just remove that arg and put the two tests inside?", "diff_hunk": "@@ -0,0 +1,176 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import tempfile\n+import unittest\n+from unittest import mock\n+\n+from torch.utils.data import DataLoader\n+\n+from accelerate import DistributedType\n+from accelerate.test_utils.examples import compare_against_test\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n+\n+\n+SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n+sys.path.extend(SRC_DIRS)\n+\n+if SRC_DIRS is not None:\n+ import checkpointing\n+ import tracking\n+\n+# DataLoaders built from `test_samples/MRPC` for quick testing\n+# Should mock `{script_name}.get_dataloaders` via:\n+# @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+class ExampleDifferenceTests(unittest.TestCase):\n+ \"\"\"\n+ This TestCase checks that all of the `complete_*` scripts contain all of the\n+ information found in the `by_feature` scripts, line for line.\n+ \"\"\"\n+\n+ def one_complete_example(\n+ self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None\n+ ):\n+ \"\"\"\n+ Tests a single `complete` example against all of the implemented `by_feature` scripts\n+\n+ Args:\n+ complete_file_name (`str`):\n+ The filename of a complete example\n+ parser_only (`bool`):\n+ Whether to look at the main training function, or the argument parser\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary base file to strip all script information not relevant for checking,\n+ such as \"cv_example.py\" when testing \"complete_cv_example.py\"\n+ special_strings (`list`, *optional*):\n+ A list of strings to potentially remove before checking no differences are left. These should be\n+ diffs that are file specific, such as different logging variations between files.\n+ \"\"\"\n+ self.maxDiff = None\n+ by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n+ examples_path = os.path.abspath(\"examples\")\n+ for item in os.listdir(by_feature_path):\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n+\n+ def test_nlp_examples(self):\n+ self.one_complete_example(\"complete_nlp_example.py\", True)\n+ self.one_complete_example(\"complete_nlp_example.py\", False)\n+", "from_author": false }, { "body": "I'm making a note for this inside of the documentation, but the reasoning for separation is it lets the test failure be more readable as to what section failed, rather than one complete error.\r\nNotice the `tested_section` part\r\nE.g.:\r\n\r\n```bash\r\n_________ ExampleDifferenceTests.test_cv_example (feature_script='tracking.py', tested_script='complete_cv_example.py', tested_section='training_function()') _________\r\n```\r\nvs:\r\n```bash\r\n_________ ExampleDifferenceTests.test_cv_example (feature_script='tracking.py', tested_script='complete_cv_example.py', tested_section='main()') _________\r\n```\r\n\r\n(This is a `pytest-subtest` hack)", "diff_hunk": "@@ -0,0 +1,176 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import tempfile\n+import unittest\n+from unittest import mock\n+\n+from torch.utils.data import DataLoader\n+\n+from accelerate import DistributedType\n+from accelerate.test_utils.examples import compare_against_test\n+from datasets import load_dataset\n+from transformers import AutoTokenizer\n+\n+\n+SRC_DIRS = [os.path.abspath(os.path.join(\"examples\", \"by_feature\"))]\n+sys.path.extend(SRC_DIRS)\n+\n+if SRC_DIRS is not None:\n+ import checkpointing\n+ import tracking\n+\n+# DataLoaders built from `test_samples/MRPC` for quick testing\n+# Should mock `{script_name}.get_dataloaders` via:\n+# @mock.patch(\"{script_name}.get_dataloaders\", mocked_dataloaders)\n+\n+\n+def mocked_dataloaders(accelerator, batch_size: int = 16):\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ data_files = {\"train\": \"tests/test_samples/MRPC/train.csv\", \"validation\": \"tests/test_samples/MRPC/dev.csv\"}\n+ datasets = load_dataset(\"csv\", data_files=data_files)\n+ label_list = datasets[\"train\"].unique(\"label\")\n+\n+ label_to_id = {v: i for i, v in enumerate(label_list)}\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(\n+ examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None, padding=\"max_length\"\n+ )\n+ if \"label\" in examples:\n+ outputs[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"sentence1\", \"sentence2\", \"label\"],\n+ )\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=2)\n+ eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=1)\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+class ExampleDifferenceTests(unittest.TestCase):\n+ \"\"\"\n+ This TestCase checks that all of the `complete_*` scripts contain all of the\n+ information found in the `by_feature` scripts, line for line.\n+ \"\"\"\n+\n+ def one_complete_example(\n+ self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None\n+ ):\n+ \"\"\"\n+ Tests a single `complete` example against all of the implemented `by_feature` scripts\n+\n+ Args:\n+ complete_file_name (`str`):\n+ The filename of a complete example\n+ parser_only (`bool`):\n+ Whether to look at the main training function, or the argument parser\n+ secondary_filename (`str`, *optional*):\n+ A potential secondary base file to strip all script information not relevant for checking,\n+ such as \"cv_example.py\" when testing \"complete_cv_example.py\"\n+ special_strings (`list`, *optional*):\n+ A list of strings to potentially remove before checking no differences are left. These should be\n+ diffs that are file specific, such as different logging variations between files.\n+ \"\"\"\n+ self.maxDiff = None\n+ by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\n+ examples_path = os.path.abspath(\"examples\")\n+ for item in os.listdir(by_feature_path):\n+ item_path = os.path.join(by_feature_path, item)\n+ if os.path.isfile(item_path) and \".py\" in item_path:\n+ with self.subTest(\n+ tested_script=complete_file_name,\n+ feature_script=item,\n+ tested_section=\"main()\" if parser_only else \"training_function()\",\n+ ):\n+ diff = compare_against_test(\n+ os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename\n+ )\n+ diff = \"\\n\".join(diff)\n+ if special_strings is not None:\n+ for string in special_strings:\n+ diff = diff.replace(string, \"\")\n+ self.assertEqual(diff, \"\")\n+\n+ def test_nlp_examples(self):\n+ self.one_complete_example(\"complete_nlp_example.py\", True)\n+ self.one_complete_example(\"complete_nlp_example.py\", False)\n+", "from_author": true }, { "body": "Should go inside triple quotes.", "diff_hunk": "@@ -0,0 +1,143 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each\n+# `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing,\n+# while the others are used to either get the code that matters, or to preprocess them (such as stripping comments)", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "I've come up with a solution to make sure that also the main `complete_` examples will raise an error if they differ from the bits inside of `by_feature`. Now eventually it would be good to have an exclusion list of files for when we *wouldn't* expect to see some feature added (such as say cross validation). But we'll cross that bridge when we get there. In the meantime here is the report generated back to the user from pytest:\r\n\r\n(Note: `pytest-subtests` was added as a dep to make sure we could use `TestCase.subTest` and make this code clean and efficient)\r\n```bash\r\n======================================================================= short test summary info =======================================================================\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_body - AssertionError: 14 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_body - AssertionError: 27 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_parser - AssertionError: 1 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_cv_example_parser - AssertionError: 4 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_nlp_example_parser - AssertionError: 1 != 0\r\nSUBFAIL tests/test_examples.py::ExampleDifferenceTests::test_complete_nlp_example_parser - AssertionError: 1 != 0\r\n=============================================================== 6 failed, 4 passed, 1 warning in 1.64s ================================================================\r\n```\r\n\r\n{{ Removed full trace, see the following message for an example }} ", "from_author": true }, { "body": "Slightly tweaked how they look, I've now included it posting the source code diffs. I believe this is a must as it will tell the user exactly what parts were missing from the full example:\r\n\r\n```bash\r\n============================================================================== FAILURES ===============================================================================\r\n_________ ExampleDifferenceTests.test_cv_example (feature_script='tracking.py', tested_script='complete_cv_example.py', tested_section='training_function()') _________\r\n\r\nself = <test_examples.ExampleDifferenceTests testMethod=test_cv_example>, complete_file_name = 'complete_cv_example.py', parser_only = False\r\n\r\n def one_complete_example(self, complete_file_name: str, parser_only: bool):\r\n \"\"\"\r\n Tests a single `complete` example against all of the implemented `by_feature` scripts\r\n \r\n Args:\r\n complete_file_name (`str`):\r\n The filename of a complete example\r\n parser_only (`bool`):\r\n Whether to look at the main training function, or the argument parser\r\n \"\"\"\r\n self.maxDiff = None\r\n by_feature_path = os.path.abspath(os.path.join(\"examples\", \"by_feature\"))\r\n examples_path = os.path.abspath(\"examples\")\r\n for item in os.listdir(by_feature_path):\r\n item_path = os.path.join(by_feature_path, item)\r\n if os.path.isfile(item_path) and \".py\" in item_path:\r\n with self.subTest(tested_script=complete_file_name, feature_script=item, tested_section=\"main()\" if parser_only else \"training_function()\"):\r\n diff = compare_against_test(\r\n os.path.join(examples_path, \"nlp_example.py\"),\r\n os.path.join(examples_path, complete_file_name),\r\n item_path,\r\n parser_only\r\n )\r\n> self.assertEqual('\\n'.join(diff), '')\r\nE AssertionError: ' accelerator = Accelerator(cpu=arg[693 chars]()\\n' != ''\r\nE - accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\r\nE - \r\nE - accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\r\nE - \r\nE - accelerator.init_trackers(\"nlp_example\", config)\r\nE - \r\nE - predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\r\nE - \r\nE - predictions=predictions,\r\nE - \r\nE - references=references,\r\nE - \r\nE - accelerator.log(\r\nE - \r\nE - {\r\nE - \r\nE - \"accuracy\": eval_metric[\"accuracy\"],\r\nE - \r\nE - \"f1\": eval_metric[\"f1\"],\r\nE - \r\nE - \"train_loss\": total_loss,\r\nE - \r\nE - \"epoch\": epoch,\r\nE - \r\nE - }\r\nE - \r\nE - accelerator.end_training()\r\n\r\ntests/test_examples.py:107: AssertionError\r\n```", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/313", "pr_id": 906668672 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex 115384a42..bb248e9d1 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -186,10 +186,20 @@ To run it in each of these various modes, use the following commands:\n \n ## Finer Examples\n \n-While the first two scripts are extremely barebones when it comes to what you can do with accelerate, there are two complementary scripts for each: `complete_cv_example.py` and `complete_nlp_example.py`.\n+While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.\n \n-These two serve to document what else the `Accelerate` library is able to do, outside of just the basic training loop. They each have the following additional arguments:\n+### `by_feature` examples\n+\n+These scripts are *individual* examples highlighting one particular feature or use-case within Accelerate. They all stem from the [nlp_example.py](./nlp_example.py) script, and any changes or modifications is denoted with a `# New Code #` comment.\n+\n+Read the README.md file located in the `by_feature` folder for more information.\n+\n+### `complete_*` examples\n+\n+These two scripts contain *every* single feature currently available in Accelerate in one place, as one giant script.\n+\n+New arguments that can be passed include:\n \n - `checkpointing_steps`, whether the various states should be saved at the end of every `n` steps, or `\"epoch\"` for each epoch. States are then saved to folders named `step_{n}` or `epoch_{n}`\n - `resume_from_checkpoint`, should be used if you want to resume training off of a previous call to the script and passed a `checkpointing_steps` to it.\n-- `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML.\n\\ No newline at end of file\n+- `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML.\ndiff --git a/examples/by_feature/README.md b/examples/by_feature/README.md\nnew file mode 100644\nindex 000000000..40ffa3c1e\n--- /dev/null\n+++ b/examples/by_feature/README.md\n@@ -0,0 +1,56 @@\n+# What are these scripts?\n+\n+All scripts in this folder originate from the `nlp_example.py` file, as it is a very simplistic NLP training example using Accelerate with zero extra features.\n+\n+From there, each further script adds in just **one** feature of Accelerate, showing how you can quickly modify your own scripts to implement these capabilities.\n+\n+A full example with all of these parts integrated together can be found in the `complete_nlp_example.py` script and `complete_cv_example.py` script.\n+\n+Adjustments to each script from the base `nlp_example.py` file can be found quickly by searching for \"# New Code #\"\n+\n+## Example Scripts by Feature and their Arguments\n+\n+### Base Example (`../nlp_example.py`)\n+\n+- Shows how to use `Accelerator` in an extremely simplistic PyTorch training loop\n+- Arguments available:\n+ - `mixed_precision`, whether to use mixed precision. (\"no\", \"fp16\", or \"bf16\")\n+ - `cpu`, whether to train using only the CPU. (yes/no/1/0)\n+\n+All following scripts also accept these arguments in addition to their added ones.\n+\n+These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:\n+\n+```bash\n+accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0\n+```\n+\n+### Checkpointing and Resuming Training (`checkpointing.py`)\n+\n+- Shows how to use `Accelerator.save_state` and `Accelerator.load_state` to save or continue training\n+- **It is assumed you are continuing off the same training script**\n+- Arguments available:\n+ - `checkpointing_steps`, after how many steps the various states should be saved. (\"epoch\", 1, 2, ...)\n+ - `output_dir`, where saved state folders should be saved to, default is current working directory\n+ - `resume_from_checkpoint`, what checkpoint folder to resume from. (\"epoch_0\", \"step_22\", ...)\n+\n+These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:\n+\n+(Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag)\n+\n+```bash\n+accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir \"checkpointing_tutorial\" --resume_from_checkpoint \"checkpointing_tutorial/epoch_0\"\n+```\n+\n+### Experiment Tracking (`tracking.py`)\n+\n+- Shows how to use `Accelerate.init_trackers` and `Accelerator.log`\n+- Can be used with Weights and Biases, TensorBoard, or CometML.\n+- Arguments available:\n+ - `with_tracking`, whether to load in all available experiment trackers from the environment.\n+\n+These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:\n+\n+```bash\n+accelerate launch ./tracking.py --with_tracking\n+```\ndiff --git a/examples/by_feature/checkpointing.py b/examples/by_feature/checkpointing.py\nnew file mode 100644\nindex 000000000..35205107d\n--- /dev/null\n+++ b/examples/by_feature/checkpointing.py\n@@ -0,0 +1,282 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing the checkpointing capability,\n+# and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To help focus on the differences in the code, building `DataLoaders`\n+# was refactored into its own function.\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # New Code #\n+ # Parse out whether we are saving every epoch or after a certain number of batches\n+ if args.checkpointing_steps == \"epoch\":\n+ checkpointing_steps = args.checkpointing_steps\n+ elif args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ raise ValueError(\n+ f\"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.\"\n+ )\n+\n+ set_seed(seed)\n+\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # New Code #\n+ # We need to keep track of how many total steps we have iterated over\n+ if isinstance(checkpointing_steps, int):\n+ overall_step = 0\n+\n+ # We need to load the checkpoint back in before training here with `load_state`\n+ # The total number of epochs is adjusted based on where the state is being loaded from,\n+ # as we assume continuation of the same training script\n+ if args.resume_from_checkpoint:\n+ accelerator.print(f\"Resuming from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+\n+ if \"epoch\" in args.resume_from_checkpoint:\n+ num_epochs -= int(args.resume_from_checkpoint.replace(\"epoch_\", \"\"))\n+ resume_step = None\n+ else:\n+ resume_step = int(args.resume_from_checkpoint.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ # If resuming by step, we also need to know exactly how far into the DataLoader we went\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # New Code #\n+ # We need to skip steps until we reach the resumed step during the first epoch\n+ if args.resume_from_checkpoint and epoch == 0:\n+ if resume_step is not None and step < resume_step:\n+ pass\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ # New Code #\n+ # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state`\n+ # These are saved to folders named `step_{overall_step}`\n+ # Will contain files: \"pytorch_model.bin\", \"optimizer.bin\", \"scheduler.bin\", and \"random_states.pkl\"\n+ # If mixed precision was used, will also save a \"scalar.bin\" file\n+ if isinstance(checkpointing_steps, int):\n+ output_dir = f\"step_{overall_step}\"\n+ if overall_step % checkpointing_steps == 0:\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+ # New Code #\n+ # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state`\n+ # These are saved to folders named `step_{overall_step}`\n+ # Will contain files: \"pytorch_model.bin\", \"optimizer.bin\", \"scheduler.bin\", and \"random_states.pkl\"\n+ # If mixed precision was used, will also save a \"scalar.bin\" file\n+ if checkpointing_steps == \"epoch\":\n+ output_dir = f\"epoch_{num_epochs}\"\n+ if args.output_dir is not None:\n+ output_dir = os.path.join(args.output_dir, output_dir)\n+ accelerator.save_state(output_dir)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=\"epoch\",\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\".\",\n+ help=\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/examples/by_feature/tracking.py b/examples/by_feature/tracking.py\nnew file mode 100644\nindex 000000000..ca11fa04b\n--- /dev/null\n+++ b/examples/by_feature/tracking.py\n@@ -0,0 +1,238 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate,\n+# specifically showcasing the experiment tracking capability,\n+# and builds off the `nlp_example.py` script.\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To help focus on the differences in the code, building `DataLoaders`\n+# was refactored into its own function.\n+# New additions from the base script can be found quickly by\n+# looking for the # New Code # tags\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):\n+ \"\"\"\n+ Creates a set of `DataLoader`s for the `glue` dataset,\n+ using \"bert-base-cased\" as the tokenizer.\n+\n+ Args:\n+ accelerator (`Accelerator`):\n+ An `Accelerator` object\n+ batch_size (`int`, *optional*):\n+ The batch size for the train and validation DataLoaders.\n+ \"\"\"\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ return train_dataloader, eval_dataloader\n+\n+\n+def training_function(config, args):\n+ # Initialize Accelerator\n+\n+ # New Code #\n+ # We pass in \"all\" to `log_with` to grab all available trackers in the environment\n+ # Note: If using a custom `Tracker` class, should be passed in here such as:\n+ # >>> log_with = [\"all\", MyCustomTrackerClassInstance()]\n+ if args.with_tracking:\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ set_seed(seed)\n+\n+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # New Code #\n+ # We need to initalize the trackers we use. Overall configurations can also be stored\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"accelerate_glue_with_tracking\", config)\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ # New Code #\n+ # For our tracking example, we will log the total loss of each epoch\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ # New Code #\n+ total_loss += loss.detach().float()\n+ loss = loss / gradient_accumulation_steps\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True` (the default).\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ # It is slightly faster to call this once, than multiple times\n+ predictions, references = accelerator.gather((predictions, batch[\"labels\"]))\n+ metric.add_batch(\n+ predictions=predictions,\n+ references=references,\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+\n+ # New Code #\n+ # To actually log, we call `Accelerator.log`\n+ # The values passed can be of `str`, `int`, or `float`\n+ accelerator.log(\n+ {\"accuracy\": eval_metric[\"accuracy\"], \"f1\": eval_metric[\"f1\"], \"train_loss\": total_loss, \"epoch\": epoch}\n+ )\n+\n+ # New Code #\n+ # When a run is finished, you should call `accelerator.end_training()`\n+ # to close all of the open trackers\n+ accelerator.end_training()\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ action=\"store_true\",\n+ help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex a40fe6bef..b14b067be 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -166,7 +166,7 @@ def training_function(config, args):\n model.eval()\n accurate = 0\n num_elems = 0\n- for step, batch in enumerate(eval_dataloader):\n+ for _, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n inputs = (batch[\"image\"] - mean) / std\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 8f0d25541..915d35807 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -51,8 +51,7 @@\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n-\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n@@ -64,6 +63,8 @@ def training_function(config, args):\n datasets = load_dataset(\"glue\", \"mrpc\")\n metric = load_metric(\"glue\", \"mrpc\")\n \n+ set_seed(seed)\n+\n def tokenize_function(examples):\n # max_length=None => use the model max length (it's actually the default)\n outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n@@ -100,8 +101,6 @@ def collate_fn(examples):\n tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n )\n \n- set_seed(seed)\n-\n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n \n@@ -161,7 +160,6 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n", "code_comments": [ { "body": "Just as a note, I noticed we set_seed *before* building the dataloaders, hence why this change. ", "diff_hunk": "@@ -64,6 +63,8 @@ def training_function(config, args):\n datasets = load_dataset(\"glue\", \"mrpc\")\n metric = load_metric(\"glue\", \"mrpc\")\n \n+ set_seed(seed)\n+", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "A note on `transformers` examples:\r\n\r\n> This should be decided on a case-by-case basis, as to whether the new feature is important enough to put in those scripts.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/312", "pr_id": 906346536 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 15eaaecb6..d72680bea 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -54,9 +54,10 @@ def get_cluster_input():\n \n if distributed_type == DistributedType.NO:\n use_cpu = _ask_field(\n- \"Do you want to run your training on CPU only (even if a GPU is available)? [no]:\",\n- lambda x: bool(x),\n+ \"Do you want to run your training on CPU only (even if a GPU is available)? [yes/NO]:\",\n+ _convert_yes_no_to_bool,\n default=False,\n+ error_message=\"Please enter yes or no.\",\n )\n elif distributed_type == DistributedType.MULTI_CPU:\n use_cpu = True\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex b958dbbfc..b3974afe2 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -159,10 +159,10 @@ def test_log(self):\n break\n # Check HPS through careful parsing and cleaning\n cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n- self.assertEqual(self.get_value_from_log(\"total_loss\", cleaned_log), \"0.1\")\n- self.assertEqual(self.get_value_from_log(\"iteration\", cleaned_log), \"1\")\n- self.assertEqual(self.get_value_from_log(\"my_text\", cleaned_log), \"some_value\")\n- self.assertEqual(self.get_value_from_log(\"_step\", cleaned_log), \"0\")\n+ self.assertTrue(\"0.1\" in self.get_value_from_log(\"total_loss\", cleaned_log))\n+ self.assertTrue(\"1\" in self.get_value_from_log(\"iteration\", cleaned_log))\n+ self.assertTrue(\"some_value\" in self.get_value_from_log(\"my_text\", cleaned_log))\n+ self.assertTrue(\"0\" in self.get_value_from_log(\"_step\", cleaned_log))\n \n \n # Comet has a special `OfflineExperiment` we need to use for testing\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/311", "pr_id": 903988203 }, { "diff": "diff --git a/README.md b/README.md\nindex 3e21a3ae1..a3fc74cef 100644\n--- a/README.md\n+++ b/README.md\n@@ -168,7 +168,7 @@ mpirun -np 2 python examples/nlp_example.py\n \n ## Launching training using DeepSpeed\n \n-πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. to use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.\n \n ```python\n from accelerator import Accelerator, DeepSpeedPlugin\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex 2b405e8ec..1c62f05f5 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -398,10 +398,6 @@ DeepSpeed support is experimental, so the underlying API will evolve in the near\n breaking changes. In particular, πŸ€— Accelerate does not support DeepSpeed config you have written yourself yet, this\n will be added in a next version.\n \n-One main caveat for the DeepSpeed integration is that the DeepSpeed launcher always passes a `local_rank` variable to\n-the training script, so your training script should accept it (whether you launch training with the DeepSpeed launcher\n-or `accelerate launch`).\n-\n <Tip warning={true}>\n \n The [`notebook_launcher`] does not support the DeepSpeed integration yet.\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 0cbd66884..f2cab1ccb 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -233,7 +233,7 @@ def multi_gpu_launcher(args):\n \n \n def deepspeed_launcher(args):\n- cmd = [\"deepspeed\"]\n+ cmd = [\"deepspeed\", \"--no_local_rank\"]\n if args.num_machines > 1:\n cmd.extend(\n [\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/309", "pr_id": 899466884 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 6e0742e02..e18686e12 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -284,9 +284,10 @@ def use_fp16(self):\n @property\n def mixed_precision(self):\n if self.distributed_type == DistributedType.DEEPSPEED:\n- if self.state.deepspeed_plugin.deepspeed_config[\"fp16\"][\"enabled\"]:\n+ config = self.state.deepspeed_plugin.deepspeed_config\n+ if config.get(\"fp16\", {}).get(\"enabled\", False):\n mixed_precision = \"fp16\"\n- elif self.state.deepspeed_plugin.deepspeed_config[\"bf16\"][\"enabled\"]:\n+ elif config.get(\"bf16\", {}).get(\"enabled\", False):\n mixed_precision = \"bf16\"\n else:\n mixed_precision = \"no\"\n", "code_comments": [ { "body": "```suggestion\r\n if config.get(\"fp16\", {}).get(\"enabled\", False):\r\n```", "diff_hunk": "@@ -284,9 +284,10 @@ def use_fp16(self):\n @property\n def mixed_precision(self):\n if self.distributed_type == DistributedType.DEEPSPEED:\n- if self.state.deepspeed_plugin.deepspeed_config[\"fp16\"][\"enabled\"]:\n+ config = self.state.deepspeed_plugin.deepspeed_config\n+ if \"fp16\" in config and config[\"fp16\"].get(\"enabled\", False):", "from_author": false }, { "body": "```suggestion\r\n elif config.get(\"bf16\", {}).get(\"enabled\", False):\r\n```", "diff_hunk": "@@ -284,9 +284,10 @@ def use_fp16(self):\n @property\n def mixed_precision(self):\n if self.distributed_type == DistributedType.DEEPSPEED:\n- if self.state.deepspeed_plugin.deepspeed_config[\"fp16\"][\"enabled\"]:\n+ config = self.state.deepspeed_plugin.deepspeed_config\n+ if \"fp16\" in config and config[\"fp16\"].get(\"enabled\", False):\n mixed_precision = \"fp16\"\n- elif self.state.deepspeed_plugin.deepspeed_config[\"bf16\"][\"enabled\"]:\n+ elif \"bf16\" in config and config[\"bf16\"].get(\"enabled\", False):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/308", "pr_id": 899386739 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex 19995fc0a..115384a42 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -183,3 +183,13 @@ To run it in each of these various modes, use the following commands:\n ```\n * In PyTorch:\n Add an `xmp.spawn` line in your script as you usually do.\n+\n+## Finer Examples\n+\n+While the first two scripts are extremely barebones when it comes to what you can do with accelerate, there are two complementary scripts for each: `complete_cv_example.py` and `complete_nlp_example.py`.\n+\n+These two serve to document what else the `Accelerate` library is able to do, outside of just the basic training loop. They each have the following additional arguments:\n+\n+- `checkpointing_steps`, whether the various states should be saved at the end of every `n` steps, or `\"epoch\"` for each epoch. States are then saved to folders named `step_{n}` or `epoch_{n}`\n+- `resume_from_checkpoint`, should be used if you want to resume training off of a previous call to the script and passed a `checkpointing_steps` to it.\n+- `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML.\n\\ No newline at end of file\ndiff --git a/examples/complete_cv_example.py b/examples/complete_cv_example.py\nnew file mode 100644\nindex 000000000..e62568a39\n--- /dev/null\n+++ b/examples/complete_cv_example.py\n@@ -0,0 +1,274 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+import re\n+\n+import numpy as np\n+import torch\n+from torch.optim.lr_scheduler import OneCycleLR\n+from torch.utils.data import DataLoader, Dataset\n+\n+import PIL\n+from accelerate import Accelerator\n+from timm import create_model\n+from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a ResNet50 on the Oxford-IIT Pet Dataset\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+# Function to get the label from the filename\n+def extract_label(fname):\n+ stem = fname.split(os.path.sep)[-1]\n+ return re.search(r\"^(.*)_\\d+\\.jpg$\", stem).groups()[0]\n+\n+\n+class PetsDataset(Dataset):\n+ def __init__(self, file_names, image_transform=None, label_to_id=None):\n+ self.file_names = file_names\n+ self.image_transform = image_transform\n+ self.label_to_id = label_to_id\n+\n+ def __len__(self):\n+ return len(self.file_names)\n+\n+ def __getitem__(self, idx):\n+ fname = self.file_names[idx]\n+ raw_image = PIL.Image.open(fname)\n+ image = raw_image.convert(\"RGB\")\n+ if self.image_transform is not None:\n+ image = self.image_transform(image)\n+ label = extract_label(fname)\n+ if self.label_to_id is not None:\n+ label = self.label_to_id[label]\n+ return {\"image\": image, \"label\": label}\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ image_size = config[\"image_size\"]\n+ if not isinstance(image_size, (list, tuple)):\n+ image_size = (image_size, image_size)\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"cv_example\", config)\n+\n+ # Grab all the image filenames\n+ file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(\".jpg\")]\n+\n+ # Build the label correspondences\n+ all_labels = [extract_label(fname) for fname in file_names]\n+ id_to_label = list(set(all_labels))\n+ id_to_label.sort()\n+ label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}\n+\n+ # Set the seed before splitting the data.\n+ np.random.seed(seed)\n+ torch.manual_seed(seed)\n+ torch.cuda.manual_seed_all(seed)\n+\n+ # Split our filenames between train and validation\n+ random_perm = np.random.permutation(len(file_names))\n+ cut = int(0.8 * len(file_names))\n+ train_split = random_perm[:cut]\n+ eval_split = random_perm[cut:]\n+\n+ # For training we use a simple RandomResizedCrop\n+ train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()])\n+ train_dataset = PetsDataset(\n+ [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id\n+ )\n+\n+ # For evaluation, we use a deterministic Resize\n+ eval_tfm = Compose([Resize(image_size), ToTensor()])\n+ eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = create_model(\"resnet50d\", pretrained=True, num_classes=len(label_to_id))\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Freezing the base model\n+ for param in model.parameters():\n+ param.requires_grad = False\n+ for param in model.get_classifier().parameters():\n+ param.requires_grad = True\n+\n+ # We normalize the batches of images to be a bit faster.\n+ mean = torch.tensor(model.default_cfg[\"mean\"])[None, :, None, None].to(accelerator.device)\n+ std = torch.tensor(model.default_cfg[\"std\"])[None, :, None, None].to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)\n+\n+ # Instantiate learning rate scheduler\n+ lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Potentially load in the weights and states from a previous save\n+ state_restored = True\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ resume_step = None\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ if \"epoch\" in path.name:\n+ num_epochs -= int(path.name.replace(\"epoch_\", \"\"))\n+ else:\n+ resume_step = int(path.name.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ state_restored = False\n+\n+ overall_step = 0\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ # We need to skip steps until we reach the resumed step\n+ if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n+ continue\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n+ inputs = (batch[\"image\"] - mean) / std\n+ outputs = model(inputs)\n+ loss = torch.nn.functional.cross_entropy(outputs, batch[\"label\"])\n+ # We keep track of the loss at each epoch\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+ overall_step += 1\n+ if isinstance(checkpointing_steps, int):\n+ if overall_step % checkpointing_steps == 0:\n+ accelerator.save_state(f\"step_{overall_step}\")\n+ if state_restored:\n+ model.eval()\n+ accurate = 0\n+ num_elems = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n+ inputs = (batch[\"image\"] - mean) / std\n+ with torch.no_grad():\n+ outputs = model(inputs)\n+ predictions = outputs.argmax(dim=-1)\n+ accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ num_elems += accurate_preds.shape[0]\n+ accurate += accurate_preds.long().sum()\n+\n+ eval_metric = accurate.item() / num_elems\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\"accuracy\": 100 * eval_metric, \"total_loss\": total_loss, \"epoch\": epoch}, step=overall_step\n+ )\n+ if args.checkpointing_steps == \"epoch\":\n+ accelerator.save_state(f\"epoch_{epoch}\")\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\"--data_dir\", required=True, help=\"The data folder on disk.\")\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ required=False,\n+ help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/examples/complete_nlp_example.py b/examples/complete_nlp_example.py\nnew file mode 100644\nindex 000000000..57125aa29\n--- /dev/null\n+++ b/examples/complete_nlp_example.py\n@@ -0,0 +1,262 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"nlp_example\", config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Potentially load in the weights and states from a previous save\n+ state_restored = True\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ resume_step = None\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ if \"epoch\" in path.name:\n+ num_epochs -= int(path.name.replace(\"epoch_\", \"\"))\n+ else:\n+ resume_step = int(path.name.replace(\"step_\", \"\"))\n+ num_epochs -= resume_step // len(train_dataloader)\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ state_restored = False\n+\n+ overall_step = 0\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ # We need to skip steps until we reach the resumed step\n+ if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n+ continue\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ outputs = model(**batch)\n+ loss = outputs.loss\n+ loss = loss / gradient_accumulation_steps\n+ # We keep track of the loss at each epoch\n+ if args.with_tracking:\n+ total_loss += loss.detach().float()\n+ accelerator.backward(loss)\n+ if step % gradient_accumulation_steps == 0:\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ overall_step += 1\n+\n+ if isinstance(checkpointing_steps, int):\n+ if overall_step % checkpointing_steps == 0:\n+ accelerator.save_state(f\"step_{overall_step}\")\n+ if state_restored:\n+ model.eval()\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch.to(accelerator.device)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n+ predictions = outputs.logits.argmax(dim=-1)\n+ metric.add_batch(\n+ predictions=accelerator.gather(predictions),\n+ references=accelerator.gather(batch[\"labels\"]),\n+ )\n+\n+ eval_metric = metric.compute()\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}:\", eval_metric)\n+ if args.with_tracking:\n+ accelerator.log(\n+ {\n+ \"accuracy\": eval_metric[\"accuracy\"],\n+ \"f1\": eval_metric[\"f1\"],\n+ \"total_loss\": total_loss,\n+ \"epoch\": epoch,\n+ },\n+ step=overall_step,\n+ )\n+\n+ if args.checkpointing_steps == \"epoch\":\n+ accelerator.save_state(f\"epoch_{epoch}\")\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ type=str,\n+ default=None,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\",\n+ type=str,\n+ default=None,\n+ help=\"If the training should continue from a checkpoint folder.\",\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ required=False,\n+ help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n+ )\n+ args = parser.parse_args()\n+ config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex 5f2adb7e4..a40fe6bef 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -149,10 +149,6 @@ def training_function(config, args):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n- # Instantiate learning rate scheduler after preparing the training dataloader as the prepare method\n- # may change its length.\n- lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))\n-\n # Now we train the model\n for epoch in range(num_epochs):\n model.train()\n", "code_comments": [ { "body": "This is the NLP example ;-)", "diff_hunk": "@@ -51,15 +51,28 @@\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n-\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n correct_bias = config[\"correct_bias\"]\n seed = int(config[\"seed\"])\n batch_size = int(config[\"batch_size\"])\n \n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"cv_example\", config)", "from_author": false }, { "body": "```suggestion\r\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\r\n```", "diff_hunk": "@@ -172,6 +214,19 @@ def main():\n \"and an Nvidia Ampere GPU.\",\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ required=False,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\", required=False, help=\"If the training should continue from a checkpoint folder.\"\n+ )\n+ parser.add_argument(\n+ \"--with_tracking\",\n+ required=False,\n+ help=\"Whether to load in all available experiment trackers from the enviornment and use them for logging.\",", "from_author": false }, { "body": "I'd put `type=str` and `default=None`here instead of `required=False`", "diff_hunk": "@@ -172,6 +214,19 @@ def main():\n \"and an Nvidia Ampere GPU.\",\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ required=False,", "from_author": false }, { "body": "I'd put `type=str` and `default=None`here", "diff_hunk": "@@ -172,6 +214,19 @@ def main():\n \"and an Nvidia Ampere GPU.\",\n )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\n+ \"--checkpointing_steps\",\n+ required=False,\n+ help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n+ )\n+ parser.add_argument(\n+ \"--resume_from_checkpoint\", required=False, help=\"If the training should continue from a checkpoint folder.\"", "from_author": false }, { "body": "There is a difference between this argument being None or \"\". In the second case we want to resume from the last checkpoint ideally.", "diff_hunk": "@@ -127,21 +140,38 @@ def collate_fn(examples):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n+ # Potentially load in the weights and states from a previous save\n+ if args.resume_from_checkpoint:", "from_author": false }, { "body": "When resuming a training, we need to skip some steps.", "diff_hunk": "@@ -127,21 +140,38 @@ def collate_fn(examples):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n+ # Potentially load in the weights and states from a previous save\n+ if args.resume_from_checkpoint:\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+\n+ overall_step = 0", "from_author": false }, { "body": "Did you mean `-=` here?", "diff_hunk": "@@ -0,0 +1,262 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"nlp_example\", config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Potentially load in the weights and states from a previous save\n+ state_restored = True\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ resume_step = None\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ if \"epoch\" in path.name:\n+ num_epochs = -int(path.name.replace(\"epoch_\", \"\"))", "from_author": false }, { "body": "Same here, this is weird.", "diff_hunk": "@@ -0,0 +1,262 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"nlp_example\", config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Potentially load in the weights and states from a previous save\n+ state_restored = True\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ resume_step = None\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ if \"epoch\" in path.name:\n+ num_epochs = -int(path.name.replace(\"epoch_\", \"\"))\n+ else:\n+ resume_step = int(path.name.replace(\"step_\", \"\"))\n+ num_epochs = -resume_step // len(train_dataloader)", "from_author": false }, { "body": "```suggestion\r\n if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\r\n continue\r\n```\r\nand this needs a comment on what we are doing here.", "diff_hunk": "@@ -0,0 +1,262 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"nlp_example\", config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Potentially load in the weights and states from a previous save\n+ state_restored = True\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ resume_step = None\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ if \"epoch\" in path.name:\n+ num_epochs = -int(path.name.replace(\"epoch_\", \"\"))\n+ else:\n+ resume_step = int(path.name.replace(\"step_\", \"\"))\n+ num_epochs = -resume_step // len(train_dataloader)\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ state_restored = False\n+\n+ overall_step = 0\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ if args.resume_from_checkpoint and epoch == 0:\n+ for _ in range(resume_step - 1):\n+ pass", "from_author": false }, { "body": "Then no need for an else branch", "diff_hunk": "@@ -0,0 +1,262 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+\n+import torch\n+from torch.utils.data import DataLoader\n+\n+from accelerate import Accelerator, DistributedType\n+from datasets import load_dataset, load_metric\n+from transformers import (\n+ AdamW,\n+ AutoModelForSequenceClassification,\n+ AutoTokenizer,\n+ get_linear_schedule_with_warmup,\n+ set_seed,\n+)\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# This example also demonstrates the checkpointing and sharding capabilities\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+MAX_GPU_BATCH_SIZE = 16\n+EVAL_BATCH_SIZE = 32\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ if args.with_tracking:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, log_with=\"all\")\n+ else:\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n+\n+ if hasattr(args.checkpointing_steps, \"isdigit\"):\n+ checkpointing_steps = args.checkpointing_steps\n+ if args.checkpointing_steps.isdigit():\n+ checkpointing_steps = int(args.checkpointing_steps)\n+ else:\n+ checkpointing_steps = None\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ correct_bias = config[\"correct_bias\"]\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+\n+ # We need to initialize the trackers we use, and also store our configuration\n+ if args.with_tracking:\n+ accelerator.init_trackers(\"nlp_example\", config)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n+ datasets = load_dataset(\"glue\", \"mrpc\")\n+ metric = load_metric(\"glue\", \"mrpc\")\n+\n+ def tokenize_function(examples):\n+ # max_length=None => use the model max length (it's actually the default)\n+ outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=None)\n+ return outputs\n+\n+ # Apply the method we just defined to all the examples in all the splits of the dataset\n+ tokenized_datasets = datasets.map(\n+ tokenize_function,\n+ batched=True,\n+ remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n+ )\n+\n+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n+ # transformers library\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n+\n+ # If the batch size is too big we use gradient accumulation\n+ gradient_accumulation_steps = 1\n+ if batch_size > MAX_GPU_BATCH_SIZE:\n+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE\n+ batch_size = MAX_GPU_BATCH_SIZE\n+\n+ def collate_fn(examples):\n+ # On TPU it's best to pad everything to the same length or training will be very slow.\n+ if accelerator.distributed_type == DistributedType.TPU:\n+ return tokenizer.pad(examples, padding=\"max_length\", max_length=128, return_tensors=\"pt\")\n+ return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(\n+ tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size\n+ )\n+ eval_dataloader = DataLoader(\n+ tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE\n+ )\n+\n+ set_seed(seed)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n+\n+ # Instantiate scheduler\n+ lr_scheduler = get_linear_schedule_with_warmup(\n+ optimizer=optimizer,\n+ num_warmup_steps=100,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n+ )\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n+ # Potentially load in the weights and states from a previous save\n+ state_restored = True\n+ if args.resume_from_checkpoint:\n+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n+ accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n+ accelerator.load_state(args.resume_from_checkpoint)\n+ resume_step = None\n+ else:\n+ # Get the most recent checkpoint\n+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n+ dirs.sort(key=os.path.getctime)\n+ path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n+ if \"epoch\" in path.name:\n+ num_epochs = -int(path.name.replace(\"epoch_\", \"\"))\n+ else:\n+ resume_step = int(path.name.replace(\"step_\", \"\"))\n+ num_epochs = -resume_step // len(train_dataloader)\n+ resume_step = (num_epochs * len(train_dataloader)) - resume_step\n+ state_restored = False\n+\n+ overall_step = 0\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ if args.with_tracking:\n+ total_loss = 0\n+ for step, batch in enumerate(train_dataloader):\n+ if args.resume_from_checkpoint and epoch == 0:\n+ for _ in range(resume_step - 1):\n+ pass\n+ else:", "from_author": false }, { "body": "Good catch!", "diff_hunk": "@@ -149,10 +149,6 @@ def training_function(config, args):\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n- # Instantiate learning rate scheduler after preparing the training dataloader as the prepare method\n- # may change its length.\n- lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/307", "pr_id": 899272331 }, { "diff": "diff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nindex 65a5caec2..8d496a3d1 100644\n--- a/src/accelerate/tracking.py\n+++ b/src/accelerate/tracking.py\n@@ -243,3 +243,10 @@ def log(self, values: dict, step: Optional[int] = None):\n self.writer.set_step(step)\n self.writer.log_others(values)\n logger.info(\"Successfully logged to CometML\")\n+\n+ def finish(self):\n+ \"\"\"\n+ Closes `comet-ml` writer\n+ \"\"\"\n+ self.writer.end()\n+ logger.info(\"CometML run closed\")\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex 62c423ef7..b958dbbfc 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -13,11 +13,13 @@\n # limitations under the License.\n \n import csv\n+import json\n import logging\n import os\n import re\n import tempfile\n import unittest\n+import zipfile\n from pathlib import Path\n from typing import Optional\n from unittest import mock\n@@ -25,8 +27,9 @@\n # We use TF to parse the logs\n from accelerate import Accelerator\n from accelerate.test_utils.testing import MockingTestCase, TempDirTestCase, require_tensorflow\n-from accelerate.tracking import GeneralTracker\n+from accelerate.tracking import CometMLTracker, GeneralTracker\n from accelerate.utils import is_tensorflow_available\n+from comet_ml import OfflineExperiment\n \n \n if is_tensorflow_available():\n@@ -162,6 +165,66 @@ def test_log(self):\n self.assertEqual(self.get_value_from_log(\"_step\", cleaned_log), \"0\")\n \n \n+# Comet has a special `OfflineExperiment` we need to use for testing\n+def offline_init(self, run_name: str, tmpdir: str):\n+ self.run_name = run_name\n+ self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir)\n+ logger.info(f\"Initialized offline CometML project {self.run_name}\")\n+ logger.info(\"Make sure to log any initial configurations with `self.store_init_configuration` before training!\")\n+\n+\n+@mock.patch.object(CometMLTracker, \"__init__\", offline_init)\n+class CometMLTest(unittest.TestCase):\n+ @staticmethod\n+ def get_value_from_key(log_list, key: str, is_param: bool = False):\n+ \"Extracts `key` from Comet `log`\"\n+ for log in log_list:\n+ j = json.loads(log)[\"payload\"]\n+ if is_param and \"param\" in j.keys():\n+ if j[\"param\"][\"paramName\"] == key:\n+ return j[\"param\"][\"paramValue\"]\n+ if \"log_other\" in j.keys():\n+ if j[\"log_other\"][\"key\"] == key:\n+ return j[\"log_other\"][\"val\"]\n+\n+ def test_init_trackers(self):\n+ with tempfile.TemporaryDirectory() as d:\n+ tracker = CometMLTracker(\"test_project_with_config\", d)\n+ accelerator = Accelerator(log_with=tracker)\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(None, config)\n+ accelerator.end_training()\n+ log = os.listdir(d)[0] # Comet is nice, it's just a zip file here\n+ # We parse the raw logs\n+ p = os.path.join(d, log)\n+ archive = zipfile.ZipFile(p, \"r\")\n+ log = archive.open(\"messages.json\").read().decode(\"utf-8\")\n+ list_of_json = log.split(\"\\n\")[:-1]\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"num_iterations\", True), 12)\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"learning_rate\", True), 0.01)\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"some_boolean\", True), False)\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"some_string\", True), \"some_value\")\n+\n+ def test_log(self):\n+ with tempfile.TemporaryDirectory() as d:\n+ tracker = CometMLTracker(\"test_project_with_config\", d)\n+ accelerator = Accelerator(log_with=tracker)\n+ accelerator.init_trackers(None)\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ accelerator.end_training()\n+ log = os.listdir(d)[0] # Comet is nice, it's just a zip file here\n+ # We parse the raw logs\n+ p = os.path.join(d, log)\n+ archive = zipfile.ZipFile(p, \"r\")\n+ log = archive.open(\"messages.json\").read().decode(\"utf-8\")\n+ list_of_json = log.split(\"\\n\")[:-1]\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"curr_step\", True), 0)\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"total_loss\"), 0.1)\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"iteration\"), 1)\n+ self.assertEqual(self.get_value_from_key(list_of_json, \"my_text\"), \"some_value\")\n+\n+\n class MyCustomTracker(GeneralTracker):\n \"Basic tracker that writes to a csv for testing\"\n _col_names = [\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/306", "pr_id": 896538377 }, { "diff": "diff --git a/docs/source/tracking.mdx b/docs/source/tracking.mdx\nindex 256e2848b..959317887 100644\n--- a/docs/source/tracking.mdx\n+++ b/docs/source/tracking.mdx\n@@ -106,7 +106,7 @@ from typing import Optional\n import wandb\n \n \n-class WandBTracker(GeneralTracker):\n+class MyCustomTracker(GeneralTracker):\n def __init__(self, run_name: str):\n self.run_name = run_name\n wandb.init(self.run_name)\n@@ -118,6 +118,21 @@ class WandBTracker(GeneralTracker):\n wandb.log(values, step=step)\n ```\n \n+When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`~Accelerator.log_with`] to have it automatically\n+be used with the API:\n+\n+```python\n+tracker = MyCustomTracker(\"some_run_name\")\n+accelerator = Accelerate(log_with=tracker)\n+```\n+\n+These also can be mixed with existing trackers, including with `\"all\"`:\n+\n+```python\n+tracker = MyCustomTracker(\"some_run_name\")\n+accelerator = Accelerate(log_with=[tracker, \"all\"])\n+```\n+\n ## When a wrapper cannot work\n \n If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 67c981de3..6e0742e02 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -29,7 +29,7 @@\n from .optimizer import AcceleratedOptimizer\n from .scheduler import AcceleratedScheduler\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n-from .tracking import CometMLTracker, TensorBoardTracker, WandBTracker, get_available_trackers\n+from .tracking import CometMLTracker, GeneralTracker, TensorBoardTracker, WandBTracker, get_available_trackers\n from .utils import (\n DeepSpeedPlugin,\n LoggerType,\n@@ -94,15 +94,15 @@ class Accelerator:\n dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n Will default to `[\"torch\"]` for PyTorch versions <=1.5.1 and `[\"generator\"]` for PyTorch versions >= 1.6.\n- log_with (list of `str` or [`~utils.LoggerType`], *optional*):\n+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):\n A list of loggers to be setup for experiment tracking. Should be one or several of:\n \n - `\"all\"`\n - `\"tensorboard\"`\n - `\"wandb\"`\n - `\"comet_ml\"`\n-\n- If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them.\n+ If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them. Can also\n+ accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n A path to a directory for storing logs of locally-compatible loggers.\n dispatch_batches (`bool`, *optional*):\n@@ -131,26 +131,32 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n- log_with: Optional[List[Union[str, LoggerType]]] = None,\n+ log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n dispatch_batches: Optional[bool] = None,\n step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+ loggers = []\n if log_with is not None:\n- if not isinstance(log_with, list):\n+ if not isinstance(log_with, (list, tuple)):\n log_with = [log_with]\n+ logger.debug(f\"{log_with}\")\n if \"all\" in log_with or LoggerType.ALL in log_with:\n- log_with = get_available_trackers()\n+ loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n else:\n- for i, log_type in enumerate(log_with):\n- if log_type not in LoggerType:\n+ for log_type in log_with:\n+ if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n raise ValueError(\n f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\"\n )\n- log_with[i] = LoggerType(log_type)\n- log_with = list(set(log_with))\n- self.log_with = log_with\n+ if issubclass(type(log_type), GeneralTracker):\n+ loggers.append(log_type)\n+ else:\n+ log_type = LoggerType(log_type)\n+ if log_type not in loggers:\n+ loggers.append(log_type)\n+ self.log_with = loggers\n self.logging_dir = logging_dir\n \n if mixed_precision is not None:\n@@ -631,7 +637,10 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n \"\"\"\n self.trackers = []\n for tracker in self.log_with:\n- if str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n+ if issubclass(type(tracker), GeneralTracker):\n+ # Custom trackers are already initialized\n+ self.trackers.append(tracker)\n+ elif str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n self.trackers.append(TensorBoardTracker(project_name, self.logging_dir))\n elif str(tracker).lower() == \"wandb\" and is_wandb_available():\n self.trackers.append(WandBTracker(project_name))\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex b1f66b1ae..62c423ef7 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -12,17 +12,20 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import csv\n import logging\n import os\n import re\n import tempfile\n import unittest\n from pathlib import Path\n+from typing import Optional\n from unittest import mock\n \n # We use TF to parse the logs\n from accelerate import Accelerator\n from accelerate.test_utils.testing import MockingTestCase, TempDirTestCase, require_tensorflow\n+from accelerate.tracking import GeneralTracker\n from accelerate.utils import is_tensorflow_available\n \n \n@@ -157,3 +160,77 @@ def test_log(self):\n self.assertEqual(self.get_value_from_log(\"iteration\", cleaned_log), \"1\")\n self.assertEqual(self.get_value_from_log(\"my_text\", cleaned_log), \"some_value\")\n self.assertEqual(self.get_value_from_log(\"_step\", cleaned_log), \"0\")\n+\n+\n+class MyCustomTracker(GeneralTracker):\n+ \"Basic tracker that writes to a csv for testing\"\n+ _col_names = [\n+ \"total_loss\",\n+ \"iteration\",\n+ \"my_text\",\n+ \"learning_rate\",\n+ \"num_iterations\",\n+ \"some_boolean\",\n+ \"some_string\",\n+ ]\n+\n+ def __init__(self, dir: str):\n+ self.f = open(f\"{dir}/log.csv\", \"w+\")\n+ self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)\n+ self.writer.writeheader()\n+\n+ def store_init_configuration(self, values: dict):\n+ logger.info(\"Call init\")\n+ self.writer.writerow(values)\n+\n+ def log(self, values: dict, step: Optional[int]):\n+ logger.info(\"Call log\")\n+ self.writer.writerow(values)\n+\n+ def finish(self):\n+ self.f.close()\n+\n+\n+class CustomTrackerTestCase(unittest.TestCase):\n+ def test_init_trackers(self):\n+ with tempfile.TemporaryDirectory() as d:\n+ tracker = MyCustomTracker(d)\n+ accelerator = Accelerator(log_with=tracker)\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(\"Some name\", config)\n+ accelerator.end_training()\n+ with open(f\"{d}/log.csv\", \"r\") as f:\n+ data = csv.DictReader(f)\n+ data = next(data)\n+ truth = {\n+ \"total_loss\": \"\",\n+ \"iteration\": \"\",\n+ \"my_text\": \"\",\n+ \"learning_rate\": \"0.01\",\n+ \"num_iterations\": \"12\",\n+ \"some_boolean\": \"False\",\n+ \"some_string\": \"some_value\",\n+ }\n+ self.assertDictEqual(data, truth)\n+\n+ def test_log(self):\n+ with tempfile.TemporaryDirectory() as d:\n+ tracker = MyCustomTracker(d)\n+ accelerator = Accelerator(log_with=tracker)\n+ accelerator.init_trackers(\"Some name\")\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ accelerator.end_training()\n+ with open(f\"{d}/log.csv\", \"r\") as f:\n+ data = csv.DictReader(f)\n+ data = next(data)\n+ truth = {\n+ \"total_loss\": \"0.1\",\n+ \"iteration\": \"1\",\n+ \"my_text\": \"some_value\",\n+ \"learning_rate\": \"\",\n+ \"num_iterations\": \"\",\n+ \"some_boolean\": \"\",\n+ \"some_string\": \"\",\n+ }\n+ self.assertDictEqual(data, truth)\n", "code_comments": [ { "body": "```suggestion\r\n # Custom trackers are already initialized\r\n```", "diff_hunk": "@@ -631,7 +637,10 @@ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n \"\"\"\n self.trackers = []\n for tracker in self.log_with:\n- if str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n+ if issubclass(type(tracker), GeneralTracker):\n+ # Custom tracker that they already initalized", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/305", "pr_id": 896232235 }, { "diff": "diff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex cce7347f6..6a9e66d6a 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -13,8 +13,14 @@\n # limitations under the License.\n \n import asyncio\n+import os\n+import shutil\n import sys\n+import tempfile\n import unittest\n+from pathlib import Path\n+from typing import List, Union\n+from unittest import mock\n \n import torch\n \n@@ -22,6 +28,69 @@\n from ..utils import gather, is_tensorflow_available\n \n \n+class TempDirTestCase(unittest.TestCase):\n+ \"\"\"\n+ A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its\n+ data at the start of a test, and then destroyes it at the end of the TestCase.\n+\n+ Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases\n+\n+ The temporary directory location will be stored in `self.tmpdir`\n+ \"\"\"\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ \"Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`\"\n+ cls.tmpdir = tempfile.mkdtemp()\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ \"Remove `cls.tmpdir` after test suite has finished\"\n+ if os.path.exists(cls.tmpdir):\n+ shutil.rmtree(cls.tmpdir)\n+\n+ def setUp(self):\n+ \"Destroy all contents in `self.tmpdir`, but not `self.tmpdir`\"\n+ for path in Path(self.tmpdir).glob(\"**/*\"):\n+ if path.is_file():\n+ path.unlink()\n+ elif path.is_dir():\n+ shutil.rmtree(path)\n+\n+\n+class MockingTestCase(unittest.TestCase):\n+ \"\"\"\n+ A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the\n+ behavior of a class-wide mock when defining one normally will not do.\n+\n+ Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as\n+ setting an environment variable with that information.\n+\n+ The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to\n+ `super().setUp()` such as:\n+ ```python\n+ def setUp(self):\n+ super().setUp()\n+ mocks = mock.patch.dict(os.environ, {\"SOME_ENV_VAR\", \"SOME_VALUE\"})\n+ self.add_mocks(mocks)\n+ ```\n+ \"\"\"\n+\n+ def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]):\n+ \"\"\"\n+ Add custom mocks for tests that should be repeated on each test. Should be called during\n+ `MockingTestCase.setUp`, after `super().setUp()`.\n+\n+ Args:\n+ mocks (`mock.Mock` or list of `mock.Mock`):\n+ Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run\n+ \"\"\"\n+ self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks]\n+ for m in self.mocks:\n+ m.start()\n+ self.addCleanup(m.stop)\n+\n+\n def are_the_same_tensors(tensor):\n state = AcceleratorState()\n tensor = tensor[None].clone().to(state.device)\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nindex 8d5ebc6bc..b1f66b1ae 100644\n--- a/tests/test_tracking.py\n+++ b/tests/test_tracking.py\n@@ -15,7 +15,6 @@\n import logging\n import os\n import re\n-import shutil\n import tempfile\n import unittest\n from pathlib import Path\n@@ -23,7 +22,7 @@\n \n # We use TF to parse the logs\n from accelerate import Accelerator\n-from accelerate.test_utils.testing import require_tensorflow\n+from accelerate.test_utils.testing import MockingTestCase, TempDirTestCase, require_tensorflow\n from accelerate.utils import is_tensorflow_available\n \n \n@@ -43,13 +42,11 @@ def test_init_trackers(self):\n hps = None\n project_name = \"test_project_with_config\"\n with tempfile.TemporaryDirectory() as dirpath:\n- oldpwd = os.getcwd()\n- os.chdir(dirpath)\n- accelerator = Accelerator(log_with=\"tensorboard\")\n+ accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n accelerator.init_trackers(project_name, config)\n accelerator.end_training()\n- for child in Path(project_name).glob(\"*/**\"):\n+ for child in Path(f\"{dirpath}/{project_name}\").glob(\"*/**\"):\n log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n # The config log is stored one layer deeper in the logged directory\n # And names are randomly generated each time\n@@ -61,7 +58,6 @@ def test_init_trackers(self):\n plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n if plugin_data.HasField(\"session_start_info\"):\n hps = dict(plugin_data.session_start_info.hparams)\n- os.chdir(oldpwd)\n \n self.assertTrue(isinstance(hps, dict))\n keys = list(hps.keys())\n@@ -77,16 +73,14 @@ def test_log(self):\n step = None\n project_name = \"test_project_with_log\"\n with tempfile.TemporaryDirectory() as dirpath:\n- oldpwd = os.getcwd()\n- os.chdir(dirpath)\n- accelerator = Accelerator(log_with=\"tensorboard\")\n+ accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n accelerator.init_trackers(project_name)\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n accelerator.log(values, step=0)\n accelerator.end_training()\n # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n # Names are randomly generated each time\n- log = list(filter(lambda x: x.is_file(), Path(project_name).iterdir()))[0]\n+ log = list(filter(lambda x: x.is_file(), Path(f\"{dirpath}/{project_name}\").iterdir()))[0]\n serialized_examples = tf.data.TFRecordDataset(log)\n for e in serialized_examples:\n event = event_pb2.Event.FromString(e.numpy())\n@@ -99,14 +93,18 @@ def test_log(self):\n iteration = value.simple_value\n elif value.tag == \"my_text/text_summary\": # Append /text_summary to the key\n my_text = value.tensor.string_val[0].decode()\n- os.chdir(oldpwd)\n self.assertAlmostEqual(total_loss, values[\"total_loss\"])\n self.assertEqual(iteration, values[\"iteration\"])\n self.assertEqual(my_text, values[\"my_text\"])\n \n \n @mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n-class WandBTrackingTest(unittest.TestCase):\n+class WandBTrackingTest(TempDirTestCase, MockingTestCase):\n+ def setUp(self):\n+ super().setUp()\n+ # wandb let's us override where logs are stored to via the WANDB_DIR env var\n+ self.add_mocks(mock.patch.dict(os.environ, {\"WANDB_DIR\": self.tmpdir}))\n+\n @staticmethod\n def get_value_from_log(key: str, log: str, key_occurance: int = 0):\n \"\"\"\n@@ -126,7 +124,7 @@ def test_init_trackers(self):\n accelerator.init_trackers(project_name, config)\n accelerator.end_training()\n # The latest offline log is stored at wandb/latest-run/*.wandb\n- for child in Path(\"wandb/latest-run\").glob(\"*\"):\n+ for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n logger.info(child)\n if child.is_file() and child.suffix == \".wandb\":\n with open(child, \"rb\") as f:\n@@ -148,7 +146,7 @@ def test_log(self):\n accelerator.log(values, step=0)\n accelerator.end_training()\n # The latest offline log is stored at wandb/latest-run/*.wandb\n- for child in Path(\"wandb/latest-run\").glob(\"*\"):\n+ for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n if child.is_file() and child.suffix == \".wandb\":\n with open(child, \"rb\") as f:\n content = f.read()\n@@ -159,18 +157,3 @@ def test_log(self):\n self.assertEqual(self.get_value_from_log(\"iteration\", cleaned_log), \"1\")\n self.assertEqual(self.get_value_from_log(\"my_text\", cleaned_log), \"some_value\")\n self.assertEqual(self.get_value_from_log(\"_step\", cleaned_log), \"0\")\n-\n- def setUp(self):\n- os.mkdir(\".wandb_tests\")\n- os.chdir(\".wandb_tests\")\n-\n- def tearDown(self):\n- if os.getcwd().endswith(\".wandb_tests\"):\n- os.chdir(\"..\")\n- if os.path.exists(\".wandb_tests\"):\n- shutil.rmtree(\".wandb_tests\")\n-\n- @classmethod\n- def setUpClass(cls):\n- if os.path.exists(\".wandb_tests\"):\n- shutil.rmtree(\".wandb_tests\")\n", "code_comments": [ { "body": "```suggestion\r\n A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the behavior\r\n```", "diff_hunk": "@@ -13,15 +13,84 @@\n # limitations under the License.\n \n import asyncio\n+import os\n+import shutil\n import sys\n+import tempfile\n import unittest\n+from pathlib import Path\n+from typing import List, Union\n+from unittest import mock\n \n import torch\n \n from ..state import AcceleratorState, is_tpu_available\n from ..utils import gather, is_tensorflow_available\n \n \n+class TempDirTestCase(unittest.TestCase):\n+ \"\"\"\n+ A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its\n+ data at the start of a test, and then destroyes it at the end of the TestCase.\n+\n+ Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases\n+\n+ The temporary directory location will be stored in `self.tmpdir`\n+ \"\"\"\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ \"Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`\"\n+ cls.tmpdir = tempfile.mkdtemp()\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ \"Remove `cls.tmpdir` after test suite has finished\"\n+ if os.path.exists(cls.tmpdir):\n+ shutil.rmtree(cls.tmpdir)\n+\n+ def setUp(self):\n+ \"Destroy all contents in `self.tmpdir`, but not `self.tmpdir`\"\n+ for path in Path(self.tmpdir).glob(\"**/*\"):\n+ if path.is_file():\n+ path.unlink()\n+ elif path.is_dir():\n+ shutil.rmtree(path)\n+\n+\n+class MockingTestCase(unittest.TestCase):\n+ \"\"\"\n+ A TestCase class designed to dynamically add various mockers that should be used in every test, mimicing a behavior", "from_author": true } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/304", "pr_id": 896048452 }, { "diff": "diff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex bd6924990..94803a62f 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -34,6 +34,10 @@ The main work on your PyTorch `DataLoader` is done by the following function:\n \n [[autodoc]] data_loader.IterableDatasetShard\n \n+## Scheduler\n+\n+[[autodoc]] scheduler.AcceleratedScheduler\n+\n ## Distributed Config\n \n ### AcceleratorState\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex b6f539fd0..2b405e8ec 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -45,11 +45,13 @@ model on `accelerator.device` or your training will fail on TPU.\n \n </Tip>\n \n-3. Pass all objects relevant to training (optimizer, model, training dataloader) to the\n+3. Pass all objects relevant to training (optimizer, model, training dataloader, learning rate scheduler) to the\n [`~Accelerator.prepare`] method. This will make sure everything is ready for training.\n \n ```python\n-model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)\n+model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, lr_scheduler\n+)\n ```\n \n In particular, your training dataloader will be sharded accross all GPUs/TPU cores available so that each one sees a\n@@ -74,14 +76,21 @@ training loop.\n \n <Tip warning={true}>\n \n+You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped\n+at each optimizer step.\n+\n+</Tip>\n+\n+<Tip warning={true}>\n+\n Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its\n length divided by X (since your actual batch size will be multiplied by X), unless you set\n `split_batches=True`.\n \n </Tip>\n \n-Any instruction using your training dataloader length (for instance if you need the number of total training steps\n-to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n+Any instruction using your training dataloader length (for instance if you want to log the number of total training\n+steps) should go after the call to [`~Accelerator.prepare`].\n \n You can perfectly send your dataloader to [`~Accelerator.prepare`] on its own, but it's best to send the\n model and optimizer to [`~Accelerator.prepare`] together.\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex bb7c7f5b1..5f2adb7e4 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -139,11 +139,14 @@ def training_function(config, args):\n # Instantiate optimizer\n optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)\n \n+ # Instantiate learning rate scheduler\n+ lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))\n+\n # Prepare everything\n # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n # prepare method.\n- model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n- model, optimizer, train_dataloader, eval_dataloader\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n \n # Instantiate learning rate scheduler after preparing the training dataloader as the prepare method\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 432d78ae8..8f0d25541 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -113,21 +113,20 @@ def collate_fn(examples):\n # Instantiate optimizer\n optimizer = AdamW(params=model.parameters(), lr=lr, correct_bias=correct_bias)\n \n- # Prepare everything\n- # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n- # prepare method.\n- model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n- model, optimizer, train_dataloader, eval_dataloader\n- )\n-\n- # Instantiate learning rate scheduler after preparing the training dataloader as the prepare method\n- # may change its length.\n+ # Instantiate scheduler\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=100,\n num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n )\n \n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n+ )\n+\n # Now we train the model\n for epoch in range(num_epochs):\n model.train()\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex f52cf7ca8..67c981de3 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -27,6 +27,7 @@\n from .data_loader import prepare_data_loader\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n+from .scheduler import AcceleratedScheduler\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n from .tracking import CometMLTracker, TensorBoardTracker, WandBTracker, get_available_trackers\n from .utils import (\n@@ -108,6 +109,9 @@ class Accelerator:\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n underlying dataset is an `IterableDataset`, `False` otherwise.\n+ step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):\n+ Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only\n+ done under certain circumstances (at the end of each epoch, for instance).\n kwargs_handlers (`List[KwargHandler]`, *optional*)\n A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\n are created. See [kwargs](kwargs) for more information.\n@@ -130,6 +134,7 @@ def __init__(\n log_with: Optional[List[Union[str, LoggerType]]] = None,\n logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n dispatch_batches: Optional[bool] = None,\n+ step_scheduler_with_optimizer: bool = True,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n if log_with is not None:\n@@ -205,6 +210,7 @@ def __init__(\n raise ImportError(\n \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n )\n+ self.step_scheduler_with_optimizer = step_scheduler_with_optimizer\n \n # Mixed precision attributes\n self.scaler = None\n@@ -227,6 +233,7 @@ def __init__(\n # Internal references to the training objects\n self._optimizers = []\n self._models = []\n+ self._schedulers = []\n self._custom_objects = []\n \n # RNG Types\n@@ -315,16 +322,22 @@ def print(self, *args, **kwargs):\n if self.is_local_main_process:\n print(*args, **kwargs)\n \n- def _prepare_one(self, obj):\n- if isinstance(obj, torch.utils.data.DataLoader):\n+ def _prepare_one(self, obj, first_pass=False):\n+ # First pass of preparation: DataLoader, model, optimizer\n+ if isinstance(obj, torch.utils.data.DataLoader) and first_pass:\n return self.prepare_data_loader(obj)\n- elif isinstance(obj, torch.nn.Module):\n+ elif isinstance(obj, torch.nn.Module) and first_pass:\n self._models.append(obj)\n return self.prepare_model(obj)\n- elif isinstance(obj, torch.optim.Optimizer):\n+ elif isinstance(obj, torch.optim.Optimizer) and first_pass:\n optimizer = self.prepare_optimizer(obj)\n self._optimizers.append(optimizer)\n return optimizer\n+ # Second pass of preparation: LR scheduler (which need the full list of optimizers)\n+ elif isinstance(obj, torch.optim.lr_scheduler._LRScheduler) and not first_pass:\n+ scheduler = self.prepare_scheduler(obj)\n+ self._schedulers.append(scheduler)\n+ return scheduler\n else:\n return obj\n \n@@ -362,7 +375,8 @@ def prepare(self, *args):\n if self.distributed_type == DistributedType.DEEPSPEED:\n result = self._prepare_deepspeed(*args)\n else:\n- result = tuple(self._prepare_one(obj) for obj in args)\n+ result = tuple(self._prepare_one(obj, first_pass=True) for obj in args)\n+ result = tuple(self._prepare_one(obj) for obj in result)\n \n if tpu_should_fix_optimizer:\n # 2. grabbing new model parameters\n@@ -492,6 +506,21 @@ def prepare_data_loader(self, data_loader):\n def prepare_optimizer(self, optimizer):\n return AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n \n+ def prepare_scheduler(self, scheduler):\n+ # We try to find the optimizer associated with `scheduler`, the default is the full list.\n+ optimizer = self._optimizers\n+ for opt in self._optimizers:\n+ if getattr(scheduler, \"optimizer\", None) == opt.optimizer:\n+ optimizer = opt\n+ break\n+\n+ return AcceleratedScheduler(\n+ scheduler,\n+ optimizer,\n+ step_with_optimizer=self.step_scheduler_with_optimizer,\n+ split_batches=self.split_batches,\n+ )\n+\n def backward(self, loss, **kwargs):\n \"\"\"\n Use `accelerator.backward(loss)` in lieu of `loss.backward()`.\n@@ -659,7 +688,7 @@ def save_state(self, output_dir: str):\n logger.info(f\"Saving current state to {output_dir}\")\n weights = [self.get_state_dict(m) for m in self._models]\n save_location = save_accelerator_state(\n- output_dir, weights, self._optimizers, self.state.process_index, self.scaler\n+ output_dir, weights, self._optimizers, self._schedulers, self.state.process_index, self.scaler\n )\n for i, obj in enumerate(self._custom_objects):\n save_custom_state(obj, output_dir, i)\n@@ -678,7 +707,9 @@ def load_state(self, input_dir: str):\n if not os.path.isdir(input_dir):\n raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n logger.info(f\"Loading states from {input_dir}\")\n- load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n+ load_accelerator_state(\n+ input_dir, self._models, self._optimizers, self._schedulers, self.state.process_index, self.scaler\n+ )\n custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n if len(custom_checkpoints) != len(self._custom_objects):\n err = \"Warning! Number of found checkpoints does not match the number of registered objects:\"\n@@ -695,6 +726,7 @@ def free_memory(self):\n Will release all references to the internal objects stored and call the garbage collector. You should call this\n method between two trainings with different models/optimizers.\n \"\"\"\n+ self._schedulers = []\n self._optimizers = []\n self._models = []\n self.deepspeed_engine = None\n@@ -795,6 +827,6 @@ def optimizer_step_was_skipped(self):\n case the learning rate should not be changed.\n \"\"\"\n for optimizer in self._optimizers:\n- if optimizer.is_overflow:\n+ if optimizer.step_was_skipped:\n return True\n return False\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex 99f18be43..b35bec237 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -22,7 +22,7 @@\n from torch.cuda.amp import GradScaler\n \n from .state import is_tpu_available\n-from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, get_pretty_name, save\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME, get_pretty_name, save\n \n \n if is_tpu_available():\n@@ -35,7 +35,12 @@\n \n \n def save_accelerator_state(\n- output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+ output_dir: str,\n+ model_states: List[dict],\n+ optimizers: list,\n+ schedulers: list,\n+ process_index: int,\n+ scaler: GradScaler = None,\n ):\n \"\"\"\n Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.\n@@ -47,6 +52,8 @@ def save_accelerator_state(\n A list of model states\n optimizers (`List[torch.optim.Optimizer]`):\n A list of optimizer instances\n+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):\n+ A list of learning rate schedulers\n process_index (`int`):\n The current process index in the Accelerator state\n scaler (`torch.cuda.amp.GradScaler`, *optional*):\n@@ -65,6 +72,13 @@ def save_accelerator_state(\n output_optimizer_file = os.path.join(output_dir, optimizer_name)\n save(state, output_optimizer_file)\n logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # Scheduler states\n+ for i, scheduler in enumerate(schedulers):\n+ state = scheduler.state_dict()\n+ scheduler_name = f\"{SCHEDULER_NAME}.bin\" if i == 0 else f\"{SCHEDULER_NAME}_{i}.bin\"\n+ output_scheduler_file = os.path.join(output_dir, scheduler_name)\n+ save(state, output_scheduler_file)\n+ logger.info(f\"Scheduler state saved in {output_scheduler_file}\")\n # GradScaler state\n if scaler is not None:\n state = scaler.state_dict()\n@@ -87,7 +101,7 @@ def save_accelerator_state(\n return output_dir\n \n \n-def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=None):\n+def load_accelerator_state(input_dir, models, optimizers, schedulers, process_index, scaler=None):\n \"\"\"\n Loads states of the models, optimizers, scaler, and RNG generators from a given directory.\n \n@@ -98,6 +112,8 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n A list of model instances\n optimizers (`List[torch.optim.Optimizer]`):\n A list of optimizer instances\n+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):\n+ A list of learning rate schedulers\n process_index (`int`):\n The current process index in the Accelerator state\n scaler (`torch.cuda.amp.GradScaler`, *optional*):\n@@ -117,6 +133,13 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n optimizers[i].load_state_dict(torch.load(input_optimizer_file, map_location=\"cpu\"))\n logger.info(\"All optimizer states loaded successfully\")\n \n+ # Scheduler states\n+ for i, scheduler in enumerate(schedulers):\n+ scheduler_name = f\"{SCHEDULER_NAME}.bin\" if i == 0 else f\"{SCHEDULER_NAME}_{i}.bin\"\n+ input_scheduler_file = os.path.join(input_dir, scheduler_name)\n+ scheduler.load_state_dict(torch.load(input_scheduler_file))\n+ logger.info(\"All scheduler states loaded successfully\")\n+\n # GradScaler state\n if scaler is not None:\n input_scaler_file = os.path.join(input_dir, SCALER_NAME)\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 4360c0c09..1bf37b7d5 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import inspect\n+import warnings\n \n import torch\n \n@@ -141,4 +142,14 @@ def _switch_parameters(self, parameters_map):\n @property\n def is_overflow(self):\n \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n+ warnings.warn(\n+ \"The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use \"\n+ \"`optimizer.step_was_skipped` instead.\",\n+ FutureWarning,\n+ )\n+ return self._is_overflow\n+\n+ @property\n+ def step_was_skipped(self):\n+ \"\"\"Whether or not the optimizer step was skipped.\"\"\"\n return self._is_overflow\ndiff --git a/src/accelerate/scheduler.py b/src/accelerate/scheduler.py\nnew file mode 100644\nindex 000000000..8efce8dea\n--- /dev/null\n+++ b/src/accelerate/scheduler.py\n@@ -0,0 +1,80 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from .state import AcceleratorState\n+\n+\n+class AcceleratedScheduler:\n+ \"\"\"\n+ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful\n+ to avoid making a scheduler step too fast when:\n+\n+ - gradients went overflow and there was no training step (in mixed precision training)\n+ - step was skipped because of gradient accumulation\n+\n+ Args:\n+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):\n+ The scheduler to wrap.\n+ optimizers (one or a list of `torch.optim.Optimizer`):\n+ The optimizers used.\n+ step_with_optimizer (`bool`, *optional*, defaults to `True`):\n+ Whether or not the scheduler should be stepped at each optimizer step.\n+ split_batches (`bool`, *optional*, defaults to `False`):\n+ Whether or not the dataloaders split one batch across the different processes (so batch size is the same\n+ regardless of the number of processes) or create batches on each process (so batch size is the original\n+ batch size multiplied by the number of processes).\n+ \"\"\"\n+\n+ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):\n+ self.scheduler = scheduler\n+ self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]\n+ self.split_batches = split_batches\n+ self.step_with_optimizer = step_with_optimizer\n+\n+ def step(self, *args, **kwargs):\n+ if not self.step_with_optimizer:\n+ # No link between scheduler and optimizer -> just step\n+ self.scheduler.step(*args, **kwargs)\n+ return\n+\n+ # Otherwise, first make sure the optimizer was stepped.\n+ for opt in self.optimizers:\n+ if opt.step_was_skipped:\n+ return\n+\n+ if self.split_batches:\n+ # Split batches -> the training dataloader batch size is not changed so one step per training step\n+ self.scheduler.step(*args, **kwargs)\n+ else:\n+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do\n+ # num_processes steps per training step\n+ num_processes = AcceleratorState().num_processes\n+ for _ in range(num_processes):\n+ self.scheduler.step(*args, **kwargs)\n+\n+ # Passthroughs\n+ def get_last_lr(self):\n+ return self.scheduler.get_last_lr()\n+\n+ def state_dict(self):\n+ return self.scheduler.state_dict()\n+\n+ def load_state_dict(self, state_dict):\n+ self.scheduler.load_state_dict(state_dict)\n+\n+ def get_lr(self):\n+ return self.scheduler.get_lr()\n+\n+ def print_lr(self, *args, **kwargs):\n+ return self.scheduler.print_lr(*args, **kwargs)\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex c5a291ab8..9bcd87f5f 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -65,6 +65,7 @@ def is_sagemaker_available():\n MODEL_NAME = \"pytorch_model\"\n RNG_STATE_NAME = \"random_states\"\n OPTIMIZER_NAME = \"optimizer\"\n+SCHEDULER_NAME = \"scheduler\"\n \n \n class EnumWithContains(EnumMeta):\ndiff --git a/tests/test_scheduler.py b/tests/test_scheduler.py\nnew file mode 100644\nindex 000000000..8ae9e56ae\n--- /dev/null\n+++ b/tests/test_scheduler.py\n@@ -0,0 +1,62 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from functools import partial\n+\n+import torch\n+\n+from accelerate import Accelerator, debug_launcher\n+\n+\n+def scheduler_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n+ accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\n+ model = torch.nn.Linear(2, 4)\n+ optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n+ scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)\n+\n+ model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)\n+\n+ # Optimizer has stepped\n+ optimizer._is_overflow = False\n+ scheduler.step()\n+ expected_lr = 1 - (num_processes if (step_scheduler_with_optimizer and not split_batches) else 1) / 10\n+ assert (\n+ scheduler.get_last_lr()[0] == expected_lr\n+ ), f\"Wrong lr found at first step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}\"\n+\n+ # Optimizer has not stepped\n+ optimizer._is_overflow = True\n+ scheduler.step()\n+ if not step_scheduler_with_optimizer:\n+ expected_lr = 1 - 2 / 10\n+ assert (\n+ scheduler.get_last_lr()[0] == expected_lr\n+ ), f\"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}\"\n+\n+\n+class SchedulerTester(unittest.TestCase):\n+ def test_scheduler_steps_with_optimizer_single_process(self):\n+ debug_launcher(partial(scheduler_test, num_processes=1), num_processes=1)\n+ debug_launcher(partial(scheduler_test, num_processes=1, split_batches=True), num_processes=1)\n+\n+ def test_scheduler_not_step_with_optimizer_single_process(self):\n+ debug_launcher(partial(scheduler_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)\n+\n+ def test_scheduler_steps_with_optimizer_multiprocess(self):\n+ debug_launcher(scheduler_test)\n+ debug_launcher(partial(scheduler_test, num_processes=1, split_batches=True), num_processes=1)\n+\n+ def test_scheduler_not_step_with_optimizer_multiprocess(self):\n+ debug_launcher(partial(scheduler_test, step_scheduler_with_optimizer=False))\n", "code_comments": [ { "body": "```suggestion\r\n Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only\r\n done under certain circumstances (at the end of each epoch, for instance).\r\n```", "diff_hunk": "@@ -92,6 +93,9 @@ class Accelerator:\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n underlying dataset is an `IterableDataset`, `False` otherwise.\n+ step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):\n+ Set `True` if you step your learning rate scheduler at the same time as the optimizer, `False` if you only\n+ do it under certain circumstances (at the end of each epoch, for instance).", "from_author": false }, { "body": "Clever! I had wondered how you'd do this πŸ˜„ ", "diff_hunk": "@@ -458,6 +472,21 @@ def prepare_data_loader(self, data_loader):\n def prepare_optimizer(self, optimizer):\n return AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n \n+ def prepare_scheduler(self, scheduler):\n+ # We try to find the optimizer associated with `scheduler`, the default is the full list.\n+ optimizer = self._optimizers\n+ for opt in self._optimizers:\n+ if getattr(scheduler, \"optimizer\", None) == opt.optimizer:\n+ optimizer = opt\n+ break", "from_author": false }, { "body": "```suggestion\r\n logger.info(f\"Scheduler state saved in {output_scheduler_file}\")\r\n```", "diff_hunk": "@@ -65,6 +72,13 @@ def save_accelerator_state(\n output_optimizer_file = os.path.join(output_dir, optimizer_name)\n save(state, output_optimizer_file)\n logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # Scheduler states\n+ for i, scheduler in enumerate(schedulers):\n+ state = scheduler.state_dict()\n+ scheduler_name = f\"{SCHEDULER_NAME}.bin\" if i == 0 else f\"{SCHEDULER_NAME}_{i}.bin\"\n+ output_scheduler_file = os.path.join(output_dir, scheduler_name)\n+ save(state, output_scheduler_file)\n+ logger.info(f\"Optimizer state saved in {output_scheduler_file}\")", "from_author": false }, { "body": "```suggestion\r\n scheduler (`torch.optim.lr_scheduler._LRScheduler`): \r\n The scheduler to wrap.\r\n optimizers (one or a list of `torch.optim.Optimizer`): \r\n The optimizers used.\r\n```", "diff_hunk": "@@ -0,0 +1,78 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from .state import AcceleratorState\n+\n+\n+class AcceleratedScheduler:\n+ \"\"\"\n+ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful\n+ to avoid making a scheduler step too fast when:\n+\n+ - gradients went overflow and there was no training step (in mixed precision training)\n+ - step was skipped because of gradient accumulation\n+\n+ Args:\n+ scheduler (`torch.optim.lr_scheduler._LRScheduler`): The scheduler to wrap.\n+ optimizers (one or a list of `torch.optim.Optimizer`): The optimizers used.", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/301", "pr_id": 894856300 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex dd946c85d..0cbd66884 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -56,7 +56,6 @@ def launch_command_parser(subparsers=None):\n )\n parser.add_argument(\n \"--mixed_precision\",\n- default=\"no\",\n type=str,\n choices=[\"no\", \"fp16\", \"bf16\"],\n help=\"Whether or not to use mixed precision training. \"\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/300", "pr_id": 894724597 }, { "diff": "diff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex 0636dffaa..99f18be43 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -107,14 +107,14 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n for i, model in enumerate(models):\n weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n input_model_file = os.path.join(input_dir, weights_name)\n- models[i].load_state_dict(torch.load(input_model_file))\n+ models[i].load_state_dict(torch.load(input_model_file, map_location=\"cpu\"))\n logger.info(\"All model weights loaded successfully\")\n \n # Optimizer states\n for i, opt in enumerate(optimizers):\n optimizer_name = f\"{OPTIMIZER_NAME}.bin\" if i == 0 else f\"{OPTIMIZER_NAME}_{i}.bin\"\n input_optimizer_file = os.path.join(input_dir, optimizer_name)\n- optimizers[i].load_state_dict(torch.load(input_optimizer_file))\n+ optimizers[i].load_state_dict(torch.load(input_optimizer_file, map_location=\"cpu\"))\n logger.info(\"All optimizer states loaded successfully\")\n \n # GradScaler state\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "LG2M! Nice catch! Looks like we might be able to link https://github.com/huggingface/accelerate/issues/296?", "from_author": false }, { "body": "That's no linked per se since in this issue, the user was using `optimizer.load_state_dict` and not `accelerator.load_state`.", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/299", "pr_id": 893350079 }, { "diff": "diff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 2f869bd0f..432d78ae8 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -78,7 +78,7 @@ def tokenize_function(examples):\n \n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n # transformers library\n- tokenized_datasets.rename_column_(\"label\", \"labels\")\n+ tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")\n \n # If the batch size is too big we use gradient accumulation\n gradient_accumulation_steps = 1\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/298", "pr_id": 893343225 }, { "diff": "diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nindex aa04063d8..c213a9167 100644\n--- a/.github/workflows/test.yml\n+++ b/.github/workflows/test.yml\n@@ -12,6 +12,6 @@ jobs:\n with:\n python-version: 3.6\n - name: Install Python dependencies\n- run: pip install -e .[test]\n+ run: pip install setuptools==59.5.0; pip install -e .[test,test_trackers]\n - name: Run Tests\n run: make test\n\\ No newline at end of file\ndiff --git a/Makefile b/Makefile\nindex 943191f62..e5b557f10 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -25,4 +25,4 @@ style:\n \t\n # Run tests for the library\n test:\n-\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/\n+\tpython -m pytest -n auto --dist=loadfile -s -v ./tests/\n\\ No newline at end of file\ndiff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 8f266c600..0a6d45892 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -21,4 +21,6 @@\n title: Internals\n - local: checkpoint\n title: Checkpointing\n+ - local: tracking\n+ title: Experiment Tracking\n title: API Reference\ndiff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nindex 3acaaad87..bd6924990 100644\n--- a/docs/source/internal.mdx\n+++ b/docs/source/internal.mdx\n@@ -44,6 +44,10 @@ The main work on your PyTorch `DataLoader` is done by the following function:\n \n [[autodoc]] state.DistributedType\n \n+## Tracking\n+\n+[[autodoc]] tracking.GeneralTracker\n+\n ## Utilities\n \n [[autodoc]] utils.extract_model_from_parallel\ndiff --git a/docs/source/tracking.mdx b/docs/source/tracking.mdx\nnew file mode 100644\nindex 000000000..256e2848b\n--- /dev/null\n+++ b/docs/source/tracking.mdx\n@@ -0,0 +1,148 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Tracking\n+\n+There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\n+Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n+\n+## Integrated Trackers\n+\n+Currently `Accelerate` supports three trackers out-of-the-box:\n+\n+\n+[[autodoc]] tracking.TensorBoardTracker\n+\n+[[autodoc]] tracking.WandBTracker\n+\n+[[autodoc]] tracking.CometMLTracker\n+\n+To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:\n+```python\n+from accelerate import Accelerate\n+from accelerate.utils import LoggerType\n+\n+accelerator = Accelerate(log_with=\"all\") # For all available trackers in the environment\n+accelerator = Accelerate(log_with=\"wandb\")\n+accelerator = Accelerate(log_with=[\"wandb\", LoggerType.TENSORBOARD])\n+```\n+\n+At the start of your experiment [`~Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\n+```python\n+hps = {\"num_iterations\": 5, \"learning_rate\": 1e-2}\n+accelerator.init_trackers(\"my_project\", config=hps)\n+```\n+\n+When you are ready to log any data, [`~Accelerator.log`] should be used.\n+A `step` can also be passed in to correlate the data with a particular step in the training loop.\n+```python\n+accelerator.log({\"train_loss\": 1.12, \"valid_loss\": 0.8}, step=1)\n+```\n+\n+Once you've finished training, make sure to run [`~Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any.\n+```python\n+accelerator.end_training()\n+```\n+\n+\n+A full example is below:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator(log_with=\"all\")\n+config = {\n+ \"num_iterations\": 5,\n+ \"learning_rate\": 1e-2,\n+ \"loss_function\": str(my_loss_function),\n+}\n+\n+accelerator.init_trackers(\"example_project\", config=config)\n+\n+my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+device = accelerator.device\n+my_model.to(device)\n+\n+for iteration in config[\"num_iterations\"]:\n+ for step, batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n+ accelerator.log({\"training_loss\": loss}, step=step)\n+accelerator.end_training()\n+```\n+\n+\n+## Implementing Custom Trackers\n+\n+To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`~GeneralTracker`] class.\n+Every tracker must implement three functions:\n+ - `__init__`: \n+ - Should store a `run_name` and initialize the tracker API of the integrated library. \n+ - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added.\n+ - `store_init_configuration`: \n+ - Should take in a `values` dictionary and store them as a one-time experiment configuration\n+ - `log`: \n+ - Should take in a `values` dictionary and a `step`, and should log them to the run\n+\n+A brief example can be seen below with an integration with Weights and Biases, containing only the relevent information:\n+```python\n+from accelerate.tracking import GeneralTracker\n+from typing import Optional\n+\n+import wandb\n+\n+\n+class WandBTracker(GeneralTracker):\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ wandb.init(self.run_name)\n+\n+ def store_init_configuration(self, values: dict):\n+ wandb.config(values)\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ wandb.log(values, step=step)\n+```\n+\n+## When a wrapper cannot work\n+\n+If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:\n+```diff\n+from accelerate import Accelerator\n++ import neptune.new as neptune\n+\n+accelerator = Accelerator()\n++ run = neptune.init(...)\n+\n+my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+device = accelerator.device\n+my_model.to(device)\n+\n+for iteration in config[\"num_iterations\"]:\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ total_loss += loss\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n++ if accelerator.is_main_process:\n++ run[\"logs/training/batch/loss\"].log(loss)\n+```\ndiff --git a/setup.py b/setup.py\nindex c898eb5a1..0ce1e5af4 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -20,8 +20,9 @@\n extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n- \"pytest-xdist\",\n+ \"pytest-xdist\"\n ]\n+extras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorflow\"]\n extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n \n extras[\"sagemaker\"] = [\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2b623db34..f52cf7ca8 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -28,14 +28,19 @@\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n+from .tracking import CometMLTracker, TensorBoardTracker, WandBTracker, get_available_trackers\n from .utils import (\n DeepSpeedPlugin,\n+ LoggerType,\n PrecisionType,\n RNGType,\n convert_outputs_to_fp32,\n extract_model_from_parallel,\n gather,\n get_pretty_name,\n+ is_comet_ml_available,\n+ is_tensorboard_available,\n+ is_wandb_available,\n pad_across_processes,\n save,\n wait_for_everyone,\n@@ -88,6 +93,17 @@ class Accelerator:\n dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n Will default to `[\"torch\"]` for PyTorch versions <=1.5.1 and `[\"generator\"]` for PyTorch versions >= 1.6.\n+ log_with (list of `str` or [`~utils.LoggerType`], *optional*):\n+ A list of loggers to be setup for experiment tracking. Should be one or several of:\n+\n+ - `\"all\"`\n+ - `\"tensorboard\"`\n+ - `\"wandb\"`\n+ - `\"comet_ml\"`\n+\n+ If `\"all`\" is selected, will pick up all available trackers in the environment and intialize them.\n+ logging_dir (`str`, `os.PathLike`, *optional*):\n+ A path to a directory for storing logs of locally-compatible loggers.\n dispatch_batches (`bool`, *optional*):\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n@@ -111,9 +127,27 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n+ log_with: Optional[List[Union[str, LoggerType]]] = None,\n+ logging_dir: Optional[Union[str, os.PathLike]] = \"\",\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+ if log_with is not None:\n+ if not isinstance(log_with, list):\n+ log_with = [log_with]\n+ if \"all\" in log_with or LoggerType.ALL in log_with:\n+ log_with = get_available_trackers()\n+ else:\n+ for i, log_type in enumerate(log_with):\n+ if log_type not in LoggerType:\n+ raise ValueError(\n+ f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\"\n+ )\n+ log_with[i] = LoggerType(log_type)\n+ log_with = list(set(log_with))\n+ self.log_with = log_with\n+ self.logging_dir = logging_dir\n+\n if mixed_precision is not None:\n mixed_precision = str(mixed_precision)\n if mixed_precision not in PrecisionType:\n@@ -556,6 +590,50 @@ def wait_for_everyone(self):\n \"\"\"\n wait_for_everyone()\n \n+ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n+ \"\"\"\n+ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations\n+\n+ Args:\n+ project_name (`str`):\n+ The name of the project. All trackers will save their data based on this\n+ config (`dict`, *optional*):\n+ Optional starting configuration to be logged.\n+ \"\"\"\n+ self.trackers = []\n+ for tracker in self.log_with:\n+ if str(tracker).lower() == \"tensorboard\" and is_tensorboard_available():\n+ self.trackers.append(TensorBoardTracker(project_name, self.logging_dir))\n+ elif str(tracker).lower() == \"wandb\" and is_wandb_available():\n+ self.trackers.append(WandBTracker(project_name))\n+ elif str(tracker).lower() == \"comet_ml\" and is_comet_ml_available():\n+ self.trackers.append(CometMLTracker(project_name))\n+ if config is not None:\n+ for tracker in self.trackers:\n+ tracker.store_init_configuration(config)\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to all stored trackers in `self.trackers`.\n+\n+ Args:\n+ values (`dict`):\n+ Values should be a dictionary-like object containing only types `int`, `float`, or `str`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ if self.is_main_process:\n+ for tracker in self.trackers:\n+ tracker.log(values, step=step)\n+\n+ def end_training(self):\n+ \"\"\"\n+ Runs any special end training behaviors, such as stopping trackers\n+ \"\"\"\n+ if self.is_main_process:\n+ for tracker in self.trackers:\n+ tracker.finish()\n+\n def save(self, obj, f):\n \"\"\"\n Save the object passed to disk once per machine. Use in place of `torch.save`.\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex 507f16eb8..cce7347f6 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -19,7 +19,7 @@\n import torch\n \n from ..state import AcceleratorState, is_tpu_available\n-from ..utils import gather\n+from ..utils import gather, is_tensorflow_available\n \n \n def are_the_same_tensors(tensor):\n@@ -64,6 +64,17 @@ def require_multi_gpu(test_case):\n return test_case\n \n \n+def require_tensorflow(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n+ installed\n+ \"\"\"\n+ if not is_tensorflow_available():\n+ return unittest.skip(\"test requires TensorFlow\")(test_case)\n+ else:\n+ return test_case\n+\n+\n class _RunOutput:\n def __init__(self, returncode, stdout, stderr):\n self.returncode = returncode\ndiff --git a/src/accelerate/tracking.py b/src/accelerate/tracking.py\nnew file mode 100644\nindex 000000000..65a5caec2\n--- /dev/null\n+++ b/src/accelerate/tracking.py\n@@ -0,0 +1,245 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+import os\n+from abc import ABCMeta, abstractmethod\n+from typing import Optional, Union\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration\n+ functionality of a tracking API.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values: dict, step: Optional[int]):\n+ \"\"\"\n+ Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with\n+ special behavior for the `step parameter.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ pass\n+\n+ def finish(self):\n+ \"\"\"\n+ Should run any finalizing functions within the tracking API. If the API should not have one, just don't\n+ overwrite that method.\n+ \"\"\"\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run\n+ logging_dir (`str`, `os.PathLike`):\n+ Location for TensorBoard logs to be stored.\n+ \"\"\"\n+\n+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \"\"):\n+ self.run_name = run_name\n+ self.logging_dir = os.path.join(logging_dir, run_name)\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+ def finish(self):\n+ \"\"\"\n+ Closes `TensorBoard` writer\n+ \"\"\"\n+ self.writer.close()\n+ logger.info(\"TensorBoard writer closed\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ self.run = wandb.init(self.run_name)\n+ logger.info(f\"Initialized WandB project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ wandb.config.update(values)\n+ logger.info(\"Stored initial configuration hyperparameters to WandB\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ self.run.log(values, step=step)\n+ logger.info(\"Successfully logged to WandB\")\n+\n+ def finish(self):\n+ \"\"\"\n+ Closes `wandb` writer\n+ \"\"\"\n+ self.run.finish()\n+ logger.info(\"WandB run closed\")\n+\n+\n+class CometMLTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.\n+\n+ API keys must be stored in a Comet config file.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ self.writer = Experiment(project_name=run_name)\n+ logger.info(f\"Initialized CometML project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.log_parameters(values)\n+ logger.info(\"Stored initial configuration hyperparameters to CometML\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ if step is not None:\n+ self.writer.set_step(step)\n+ self.writer.log_others(values)\n+ logger.info(\"Successfully logged to CometML\")\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex af9299a4f..c5a291ab8 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -34,6 +34,22 @@\n import torch_xla.core.xla_model as xm\n \n \n+def is_tensorflow_available():\n+ return importlib.util.find_spec(\"tensorflow\") is not None\n+\n+\n+def is_tensorboard_available():\n+ return importlib.util.find_spec(\"tensorboard\") is not None or importlib.util.find_spec(\"tensorboardX\") is not None\n+\n+\n+def is_wandb_available():\n+ return importlib.util.find_spec(\"wandb\") is not None\n+\n+\n+def is_comet_ml_available():\n+ return importlib.util.find_spec(\"comet_ml\") is not None\n+\n+\n def is_boto3_available():\n return importlib.util.find_spec(\"boto3\") is not None\n \n@@ -74,6 +90,13 @@ def list(cls):\n return list(map(lambda item: str(item), cls))\n \n \n+class LoggerType(BaseEnum):\n+ ALL = \"all\"\n+ TENSORBOARD = \"tensorboard\"\n+ WANDB = \"wandb\"\n+ COMETML = \"comet_ml\"\n+\n+\n class PrecisionType(BaseEnum):\n NO = \"no\"\n FP16 = \"fp16\"\ndiff --git a/tests/test_tracking.py b/tests/test_tracking.py\nnew file mode 100644\nindex 000000000..8d5ebc6bc\n--- /dev/null\n+++ b/tests/test_tracking.py\n@@ -0,0 +1,176 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+import os\n+import re\n+import shutil\n+import tempfile\n+import unittest\n+from pathlib import Path\n+from unittest import mock\n+\n+# We use TF to parse the logs\n+from accelerate import Accelerator\n+from accelerate.test_utils.testing import require_tensorflow\n+from accelerate.utils import is_tensorflow_available\n+\n+\n+if is_tensorflow_available():\n+ import tensorflow as tf\n+ from tensorboard.plugins.hparams import plugin_data_pb2\n+ from tensorflow.core.util import event_pb2\n+ from tensorflow.python.summary.summary_iterator import summary_iterator\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class TensorBoardTrackingTest(unittest.TestCase):\n+ @require_tensorflow\n+ def test_init_trackers(self):\n+ hps = None\n+ project_name = \"test_project_with_config\"\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ oldpwd = os.getcwd()\n+ os.chdir(dirpath)\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(project_name, config)\n+ accelerator.end_training()\n+ for child in Path(project_name).glob(\"*/**\"):\n+ log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n+ # The config log is stored one layer deeper in the logged directory\n+ # And names are randomly generated each time\n+ si = summary_iterator(str(log))\n+ # Pull HPS through careful parsing\n+ for event in si:\n+ for value in event.summary.value:\n+ proto_bytes = value.metadata.plugin_data.content\n+ plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n+ if plugin_data.HasField(\"session_start_info\"):\n+ hps = dict(plugin_data.session_start_info.hparams)\n+ os.chdir(oldpwd)\n+\n+ self.assertTrue(isinstance(hps, dict))\n+ keys = list(hps.keys())\n+ keys.sort()\n+ self.assertEqual(keys, [\"learning_rate\", \"num_iterations\", \"some_boolean\", \"some_string\"])\n+ self.assertEqual(hps[\"num_iterations\"].number_value, 12)\n+ self.assertEqual(hps[\"learning_rate\"].number_value, 0.01)\n+ self.assertEqual(hps[\"some_boolean\"].bool_value, False)\n+ self.assertEqual(hps[\"some_string\"].string_value, \"some_value\")\n+\n+ @require_tensorflow\n+ def test_log(self):\n+ step = None\n+ project_name = \"test_project_with_log\"\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ oldpwd = os.getcwd()\n+ os.chdir(dirpath)\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ accelerator.init_trackers(project_name)\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ accelerator.end_training()\n+ # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n+ # Names are randomly generated each time\n+ log = list(filter(lambda x: x.is_file(), Path(project_name).iterdir()))[0]\n+ serialized_examples = tf.data.TFRecordDataset(log)\n+ for e in serialized_examples:\n+ event = event_pb2.Event.FromString(e.numpy())\n+ if step is None:\n+ step = event.step\n+ for value in event.summary.value:\n+ if value.tag == \"total_loss\":\n+ total_loss = value.simple_value\n+ elif value.tag == \"iteration\":\n+ iteration = value.simple_value\n+ elif value.tag == \"my_text/text_summary\": # Append /text_summary to the key\n+ my_text = value.tensor.string_val[0].decode()\n+ os.chdir(oldpwd)\n+ self.assertAlmostEqual(total_loss, values[\"total_loss\"])\n+ self.assertEqual(iteration, values[\"iteration\"])\n+ self.assertEqual(my_text, values[\"my_text\"])\n+\n+\n+@mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n+class WandBTrackingTest(unittest.TestCase):\n+ @staticmethod\n+ def get_value_from_log(key: str, log: str, key_occurance: int = 0):\n+ \"\"\"\n+ Parses wandb log for `key` and returns the value.\n+ If parsing through multiple calls to .log, pass in a `key_occurance`\n+ \"\"\"\n+ res = re.findall(rf\"(?<={key} )[^\\s]+\", log)[key_occurance]\n+ if '\"' in res:\n+ return re.findall(r'\"([^\"]*)\"', res)[0]\n+ else:\n+ return res\n+\n+ def test_init_trackers(self):\n+ project_name = \"test_project_with_config\"\n+ accelerator = Accelerator(log_with=\"wandb\")\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(project_name, config)\n+ accelerator.end_training()\n+ # The latest offline log is stored at wandb/latest-run/*.wandb\n+ for child in Path(\"wandb/latest-run\").glob(\"*\"):\n+ logger.info(child)\n+ if child.is_file() and child.suffix == \".wandb\":\n+ with open(child, \"rb\") as f:\n+ content = f.read()\n+ break\n+\n+ # Check HPS through careful parsing and cleaning\n+ cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n+ self.assertEqual(self.get_value_from_log(\"num_iterations\", cleaned_log), \"12\")\n+ self.assertEqual(self.get_value_from_log(\"learning_rate\", cleaned_log), \"0.01\")\n+ self.assertEqual(self.get_value_from_log(\"some_boolean\", cleaned_log), \"false\")\n+ self.assertEqual(self.get_value_from_log(\"some_string\", cleaned_log), \"some_value\")\n+\n+ def test_log(self):\n+ project_name = \"test_project_with_log\"\n+ accelerator = Accelerator(log_with=\"wandb\")\n+ accelerator.init_trackers(project_name)\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ accelerator.end_training()\n+ # The latest offline log is stored at wandb/latest-run/*.wandb\n+ for child in Path(\"wandb/latest-run\").glob(\"*\"):\n+ if child.is_file() and child.suffix == \".wandb\":\n+ with open(child, \"rb\") as f:\n+ content = f.read()\n+ break\n+ # Check HPS through careful parsing and cleaning\n+ cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n+ self.assertEqual(self.get_value_from_log(\"total_loss\", cleaned_log), \"0.1\")\n+ self.assertEqual(self.get_value_from_log(\"iteration\", cleaned_log), \"1\")\n+ self.assertEqual(self.get_value_from_log(\"my_text\", cleaned_log), \"some_value\")\n+ self.assertEqual(self.get_value_from_log(\"_step\", cleaned_log), \"0\")\n+\n+ def setUp(self):\n+ os.mkdir(\".wandb_tests\")\n+ os.chdir(\".wandb_tests\")\n+\n+ def tearDown(self):\n+ if os.getcwd().endswith(\".wandb_tests\"):\n+ os.chdir(\"..\")\n+ if os.path.exists(\".wandb_tests\"):\n+ shutil.rmtree(\".wandb_tests\")\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ if os.path.exists(\".wandb_tests\"):\n+ shutil.rmtree(\".wandb_tests\")\n", "code_comments": [ { "body": "```suggestion\r\nfrom .tracking import TensorBoardTracker\r\n```\r\n:scream: ", "diff_hunk": "@@ -17,10 +17,12 @@\n import sys\n import warnings\n from contextlib import contextmanager\n+from pathlib import Path\n from typing import List, Optional, Union\n \n import torch\n \n+from accelerate.accelerate.src.accelerate.tracking import TensorBoardTracker", "from_author": false }, { "body": "The `\"all\"` should be dealt with around here too.", "diff_hunk": "@@ -111,9 +115,17 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n+ log_with: Optional[List[Union[LoggerType, str]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+ if log_with is not None:\n+ if not isinstance(log_with, list):", "from_author": false }, { "body": "Nice!", "diff_hunk": "@@ -556,6 +568,30 @@ def wait_for_everyone(self):\n \"\"\"\n wait_for_everyone()\n \n+ def init_trackers(self, project_name: str, config: dict = None):\n+ \"\"\"\n+ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations\n+\n+ Args:\n+ project_name (`str`):\n+ \"\"\"\n+ project_location = Path(project_name)\n+ self.trackers = []\n+ for tracker in self.log_with:\n+ if tracker.lower() == \"tensorboard\" and is_tensorboard_available():\n+ self.trackers.append(TensorBoardTracker(project_location))\n+ if config is not None:\n+ for tracker in self.trackers:\n+ tracker.store_init_configuration(config)", "from_author": false }, { "body": "Should be inside an `is_tensorboard_available` flag.", "diff_hunk": "@@ -0,0 +1,72 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+\n+from torch.utils import tensorboard", "from_author": false }, { "body": "Accelerate has no idea about the global step though. We could add it in an optional argument to `log`? Or add something that keeps track of the number of time the `optimizer.step` is called internally.", "diff_hunk": "@@ -0,0 +1,72 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+\n+from torch.utils import tensorboard\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ log_directory = \"tensorboard\"\n+\n+ def __init__(self, run_name=\"\"):\n+ self.run_name = Path(run_name)\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values):\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ # add_scalar has the option for `global_step`, should we include this?", "from_author": false }, { "body": "That's more or less what my thinking is, having it be an optional argument. I don't think there's a need for us to keep track of `optimizer.step` calls internally for this yet, but we'll see what changes as this develops", "diff_hunk": "@@ -0,0 +1,72 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+\n+from torch.utils import tensorboard\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ log_directory = \"tensorboard\"\n+\n+ def __init__(self, run_name=\"\"):\n+ self.run_name = Path(run_name)\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values):\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ # add_scalar has the option for `global_step`, should we include this?", "from_author": true }, { "body": "*That is what we call VSCode not being friendly and me missing it!*", "diff_hunk": "@@ -17,10 +17,12 @@\n import sys\n import warnings\n from contextlib import contextmanager\n+from pathlib import Path\n from typing import List, Optional, Union\n \n import torch\n \n+from accelerate.accelerate.src.accelerate.tracking import TensorBoardTracker", "from_author": true }, { "body": "Decided to make it an optional argument. If folks seem to find it annoying then we can go ahead and track `optimizer.step` next", "diff_hunk": "@@ -0,0 +1,72 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+\n+from torch.utils import tensorboard\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ log_directory = \"tensorboard\"\n+\n+ def __init__(self, run_name=\"\"):\n+ self.run_name = Path(run_name)\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values):\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ # add_scalar has the option for `global_step`, should we include this?", "from_author": true }, { "body": "```suggestion\r\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\r\n Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, `str`,\r\n `float`, `int`, or `None`.\r\n```", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.", "from_author": false }, { "body": "Seems to be optional and has a default ;-)", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):", "from_author": false }, { "body": "```suggestion\r\n step (`int`, *optional*):\r\n```", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs. Value be of type `int`, `float`, or `str`.\n+ step (`int`, Optional):", "from_author": false }, { "body": "Same comment as above for the `run_name` of Tensorboard.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs. Value be of type `int`, `float`, or `str`.\n+ step (`int`, Optional):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):", "from_author": false }, { "body": "Same comments as above.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs. Value be of type `int`, `float`, or `str`.\n+ step (`int`, Optional):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ log_directory = None\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ wandb.init(self.run_name)\n+ logger.info(f\"Initialized WandB project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):", "from_author": false }, { "body": "And here too ;-)", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs. Value be of type `int`, `float`, or `str`.\n+ step (`int`, Optional):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ log_directory = None\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ wandb.init(self.run_name)\n+ logger.info(f\"Initialized WandB project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ wandb.config(values)\n+ logger.info(\"Stored initial configuration hyperparameters to WandB\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, Optional):\n+ The run step. If included, the log will be affiliated with this step.", "from_author": false }, { "body": "Same comments on the docstrings.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs. Value be of type `int`, `float`, or `str`.\n+ step (`int`, Optional):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ log_directory = None\n+\n+ def __init__(self, run_name: str = \"\"):\n+ self.run_name = run_name\n+ wandb.init(self.run_name)\n+ logger.info(f\"Initialized WandB project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be stored as initial hyperparameters as key-value pairs. Value be of type `bool`, `str`,\n+ `float`, `int`, or `None`.\n+ \"\"\"\n+ wandb.config(values)\n+ logger.info(\"Stored initial configuration hyperparameters to WandB\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (`dict`):\n+ Values to be logged as key-value pairs.\n+ step (`int`, Optional):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ wandb.log(values, step=step)\n+ logger.info(\"Successfully logged to WandB\")\n+\n+\n+class CometMLTracker(GeneralTracker):\n+ \"\"\"", "from_author": false }, { "body": "You'll need to add the `tracking.` prefix to each for the links to work.", "diff_hunk": "@@ -0,0 +1,142 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Tracking\n+\n+There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\n+Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n+\n+## Integrated Trackers\n+\n+Currently `Accelerate` supports three trackers out-of-the-box:\n+- [`~TensorBoardTracker`]\n+- [`~WandBTracker`]\n+- [`~CometMLTracker`]", "from_author": false }, { "body": "```suggestion\r\n+ run[\"logs/training/batch/loss\"].log(loss)\r\n```\r\n4 spaces please :-)", "diff_hunk": "@@ -0,0 +1,142 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Tracking\n+\n+There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\n+Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n+\n+## Integrated Trackers\n+\n+Currently `Accelerate` supports three trackers out-of-the-box:\n+- [`~TensorBoardTracker`]\n+- [`~WandBTracker`]\n+- [`~CometMLTracker`]\n+\n+To use any of them, pass in the selected type(s) to the `log_with` parameter in [`~Accelerate.__init__`]:\n+```python\n+from accelerate import Accelerate\n+from accelerate.utils import LoggerType\n+\n+accelerator = Accelerate(log_with=\"all\") # For all available trackers in the environment\n+accelerator = Accelerate(log_with=\"wandb\")\n+accelerator = Accelerate(log_with=[\"wandb\", LoggerType.TENSORBOARD])\n+```\n+\n+At the start of your experiment [`~Accelerator.init_tracker`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\n+```python\n+hps = {\"num_iterations\": 5, \"learning_rate\": 1e-2}\n+accelerator.init_trackers(\"my_project\", config=hps)\n+```\n+\n+When you are ready to log any data, [`~Accelerator.log`] should be used.\n+A `step` can also be passed in to correlate the data with a particular step in the training loop.\n+```python\n+accelerator.log({\"train_loss\": 1.12, \"valid_loss\": 0.8}, step=1)\n+```\n+\n+A full example is below:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator(log_with=\"all\")\n+config = {\n+ \"num_iterations\": 5,\n+ \"learning_rate\": 1e-2,\n+ \"loss_function\": str(my_loss_function),\n+}\n+\n+accelerator.init_trackers(\"example_project\", config=config)\n+\n+my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+device = accelerator.device\n+my_model.to(device)\n+\n+for iteration in config[\"num_iterations\"]:\n+ for step, batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n+ accelerator.log({\"training_loss\": loss}, step=step)\n+```\n+\n+[[autodoc]] tracking.TensorBoardTracker\n+\n+[[autodoc]] tracking.WandBTracker\n+\n+[[autodoc]] tracking.CometMLTracker\n+\n+\n+## Implementing Custom Trackers\n+\n+To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`~GeneralTracker`] class.\n+Every tracker must implement three functions:\n+ - `__init__`: Should store a `run_name` and initialize the tracker API of the integrated library\n+ - `store_init_configuration`: Should take in a `values` dictionary and store them as a one-time experiment configuration\n+ - `log`: Should take in a `values` dictionary and a `step`, and should log them to the run\n+As well as contain a `log_directory` attribute. If the integrated library does not store anything locally, should be set to `None`.\n+\n+A brief example can be seen below with an integration with Weights and Biases, containing only the relevent information:\n+```python\n+from accelerate.tracking import GeneralTracker\n+from typing import Optional\n+\n+import wandb\n+\n+\n+class WandBTracker(GeneralTracker):\n+ log_directory = None\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ wandb.init(self.run_name)\n+\n+ def store_init_configuration(self, values: dict):\n+ wandb.config(values)\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ wandb.log(values, step=step)\n+```\n+\n+## When a wrapper cannot work\n+\n+If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:\n+```diff\n+from accelerate import Accelerator\n++ import neptune.new as neptune\n+\n+accelerator = Accelerator()\n++ run = neptune.init(...)\n+\n+my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+device = accelerator.device\n+my_model.to(device)\n+\n+for iteration in config[\"num_iterations\"]:\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ total_loss += loss\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n++ if accelerator.is_main_process:\n++ run[\"logs/training/batch/loss\"].log(loss)", "from_author": false }, { "body": "This call should be done at the Tracker level.", "diff_hunk": "@@ -556,6 +585,39 @@ def wait_for_everyone(self):\n \"\"\"\n wait_for_everyone()\n \n+ def init_trackers(self, project_name: str, config: Optional[dict] = None):\n+ \"\"\"\n+ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations\n+\n+ Args:\n+ project_name (`str`):\n+ The name of the project. All trackers will save their data based on this\n+ config (`dict`, *optional*):\n+ Optional starting configuration to be logged.\n+ \"\"\"\n+ project_location = Path(project_name)", "from_author": false }, { "body": "The logging_dir should also be something that could be customized and default to the usual PyTorch default.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)", "from_author": false }, { "body": "This variable doesn't seem used.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")", "from_author": false }, { "body": "This doesn't seem used either.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ log_directory = None", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)\n+ logger.info(f\"Initialized TensorBoard project {self.run_name} writing to {self.logging_dir}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ self.writer.add_hparams(values, metric_dict={})\n+ self.writer.flush()\n+ logger.info(\"Stored initial configuration hyperparameters to TensorBoard\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ for k, v in values.items():\n+ if isinstance(v, (int, float)):\n+ self.writer.add_scalar(k, v, global_step=step)\n+ elif isinstance(v, str):\n+ self.writer.add_text(k, v, global_step=step)\n+ self.writer.flush()\n+ logger.info(\"Successfully logged to TensorBoard\")\n+\n+\n+class WandBTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ log_directory = None\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ wandb.init(self.run_name)\n+ logger.info(f\"Initialized WandB project {self.run_name}\")\n+ logger.info(\n+ \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n+ )\n+\n+ def store_init_configuration(self, values: dict):\n+ \"\"\"\n+ Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n+\n+ Args:\n+ values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n+ Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n+ `str`, `float`, `int`, or `None`.\n+ \"\"\"\n+ wandb.config(values)\n+ logger.info(\"Stored initial configuration hyperparameters to WandB\")\n+\n+ def log(self, values: dict, step: Optional[int] = None):\n+ \"\"\"\n+ Logs `values` to the current run.\n+\n+ Args:\n+ values (Dictionary `str` to `str`, `float`, or `int`):\n+ Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n+ step (`int`, *optional*):\n+ The run step. If included, the log will be affiliated with this step.\n+ \"\"\"\n+ wandb.log(values, step=step)\n+ logger.info(\"Successfully logged to WandB\")\n+\n+\n+class CometMLTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.\n+\n+ API keys must be stored in a Comet config file.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run.\n+ \"\"\"\n+\n+ log_directory = None", "from_author": false }, { "body": "We should remove this from the abtract class as it's not required for all of the trackers.", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str", "from_author": false }, { "body": "`logging_dir` and `run_name` are actually the same when it comes to `tensorboard`, and we can't have both. So something like this won't work: \r\n```python\r\nwriterC = SummaryWriter(\"Testme\", comment=\"LR_0.1_BATCH_16\")\r\n```\r\nSo we can just remove this completely. ", "diff_hunk": "@@ -0,0 +1,210 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+# Expectation:\n+# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n+\n+import logging\n+from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n+from typing import Optional\n+\n+from .utils import LoggerType, is_comet_ml_available, is_tensorboard_available, is_wandb_available\n+\n+\n+_available_trackers = []\n+\n+if is_tensorboard_available():\n+ from torch.utils import tensorboard\n+\n+ _available_trackers.append(LoggerType.TENSORBOARD)\n+\n+if is_wandb_available():\n+ import wandb\n+\n+ _available_trackers.append(LoggerType.WANDB)\n+\n+if is_comet_ml_available():\n+ from comet_ml import Experiment\n+\n+ _available_trackers.append(LoggerType.COMETML)\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def get_available_trackers():\n+ \"Returns a list of all supported available trackers in the system\"\n+ return _available_trackers\n+\n+\n+class GeneralTracker(object, metaclass=ABCMeta):\n+ \"\"\"\n+ A base Tracker class to be used for all logging integration implementations.\n+ \"\"\"\n+\n+ log_directory: str\n+\n+ @abstractmethod\n+ def store_init_configuration(self, values):\n+ pass\n+\n+ @abstractmethod\n+ def log(self, values, step: Optional[int]):\n+ pass\n+\n+\n+class TensorBoardTracker(GeneralTracker):\n+ \"\"\"\n+ A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n+\n+ Args:\n+ run_name (`str`):\n+ The name of the experiment run. Logs are then stored in `tensorboard/{run_name}`\n+ \"\"\"\n+\n+ log_directory = Path(\"tensorboard\")\n+\n+ def __init__(self, run_name: str):\n+ self.run_name = run_name\n+ self.writer = tensorboard.SummaryWriter(self.logging_dir / self.run_name)", "from_author": true }, { "body": "Not following why we have to add TensoFlow here. Can't it be just `tensorboard`?", "diff_hunk": "@@ -21,6 +21,8 @@\n extras[\"test\"] = [\n \"pytest\",\n \"pytest-xdist\",\n+ \"tensorflow\",", "from_author": false }, { "body": "It would probably be best to use a temporary directory here, to avoid users running the tests locally getting this folder.", "diff_hunk": "@@ -0,0 +1,89 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+import shutil\n+import unittest\n+from pathlib import Path\n+\n+# We use TF to parse the logs\n+import tensorflow as tf\n+from accelerate import Accelerator\n+from tensorboard.plugins.hparams import plugin_data_pb2\n+from tensorflow.core.util import event_pb2\n+from tensorflow.python.summary.summary_iterator import summary_iterator\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class TensorBoardTrackingTest(unittest.TestCase):\n+ @classmethod\n+ def teardown_class(cls):\n+ shutil.rmtree(\"test_project_with_config\")\n+ shutil.rmtree(\"test_project_with_log\")\n+\n+ def test_init_trackers(self):\n+ hps = None\n+ project_name = \"test_project_with_config\"", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -0,0 +1,89 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+import shutil\n+import unittest\n+from pathlib import Path\n+\n+# We use TF to parse the logs\n+import tensorflow as tf\n+from accelerate import Accelerator\n+from tensorboard.plugins.hparams import plugin_data_pb2\n+from tensorflow.core.util import event_pb2\n+from tensorflow.python.summary.summary_iterator import summary_iterator\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class TensorBoardTrackingTest(unittest.TestCase):\n+ @classmethod\n+ def teardown_class(cls):\n+ shutil.rmtree(\"test_project_with_config\")\n+ shutil.rmtree(\"test_project_with_log\")\n+\n+ def test_init_trackers(self):\n+ hps = None\n+ project_name = \"test_project_with_config\"\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(project_name, config)\n+ for child in Path(project_name).glob(\"*/**\"):\n+ log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n+ # The config log is stored one layer deeper in the logged directory\n+ # And names are randomly generated each time\n+ si = summary_iterator(str(log))\n+ # Pull HPS through careful parsing\n+ for event in si:\n+ for value in event.summary.value:\n+ proto_bytes = value.metadata.plugin_data.content\n+ plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n+ if plugin_data.HasField(\"session_start_info\"):\n+ hps = dict(plugin_data.session_start_info.hparams)\n+\n+ self.assertTrue(isinstance(hps, dict))\n+ keys = list(hps.keys())\n+ keys.sort()\n+ self.assertEqual(keys, [\"learning_rate\", \"num_iterations\", \"some_boolean\", \"some_string\"])\n+ self.assertEqual(hps[\"num_iterations\"].number_value, 12)\n+ self.assertEqual(hps[\"learning_rate\"].number_value, 0.01)\n+ self.assertEqual(hps[\"some_boolean\"].bool_value, False)\n+ self.assertEqual(hps[\"some_string\"].string_value, \"some_value\")\n+\n+ def test_log(self):\n+ step = None\n+ project_name = \"test_project_with_log\"", "from_author": false }, { "body": "Is it why the TF dep is necessary? There is really no other way to load a TensorBoard log?", "diff_hunk": "@@ -0,0 +1,89 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+import shutil\n+import unittest\n+from pathlib import Path\n+\n+# We use TF to parse the logs\n+import tensorflow as tf\n+from accelerate import Accelerator\n+from tensorboard.plugins.hparams import plugin_data_pb2\n+from tensorflow.core.util import event_pb2\n+from tensorflow.python.summary.summary_iterator import summary_iterator\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class TensorBoardTrackingTest(unittest.TestCase):\n+ @classmethod\n+ def teardown_class(cls):\n+ shutil.rmtree(\"test_project_with_config\")\n+ shutil.rmtree(\"test_project_with_log\")\n+\n+ def test_init_trackers(self):\n+ hps = None\n+ project_name = \"test_project_with_config\"\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(project_name, config)\n+ for child in Path(project_name).glob(\"*/**\"):\n+ log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n+ # The config log is stored one layer deeper in the logged directory\n+ # And names are randomly generated each time\n+ si = summary_iterator(str(log))\n+ # Pull HPS through careful parsing\n+ for event in si:\n+ for value in event.summary.value:\n+ proto_bytes = value.metadata.plugin_data.content\n+ plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n+ if plugin_data.HasField(\"session_start_info\"):\n+ hps = dict(plugin_data.session_start_info.hparams)\n+\n+ self.assertTrue(isinstance(hps, dict))\n+ keys = list(hps.keys())\n+ keys.sort()\n+ self.assertEqual(keys, [\"learning_rate\", \"num_iterations\", \"some_boolean\", \"some_string\"])\n+ self.assertEqual(hps[\"num_iterations\"].number_value, 12)\n+ self.assertEqual(hps[\"learning_rate\"].number_value, 0.01)\n+ self.assertEqual(hps[\"some_boolean\"].bool_value, False)\n+ self.assertEqual(hps[\"some_string\"].string_value, \"some_value\")\n+\n+ def test_log(self):\n+ step = None\n+ project_name = \"test_project_with_log\"\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ accelerator.init_trackers(project_name)\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n+ # Names are randomly generated each time\n+ log = list(filter(lambda x: x.is_file(), Path(project_name).iterdir()))[0]\n+ serialized_examples = tf.data.TFRecordDataset(log)", "from_author": false }, { "body": "Yup, sadly: https://github.com/tensorflow/tensorboard#how-can-i-export-data-from-tensorboard\r\n\r\nWe have to use `tf` to parse it back in. ", "diff_hunk": "@@ -0,0 +1,89 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+import shutil\n+import unittest\n+from pathlib import Path\n+\n+# We use TF to parse the logs\n+import tensorflow as tf\n+from accelerate import Accelerator\n+from tensorboard.plugins.hparams import plugin_data_pb2\n+from tensorflow.core.util import event_pb2\n+from tensorflow.python.summary.summary_iterator import summary_iterator\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+class TensorBoardTrackingTest(unittest.TestCase):\n+ @classmethod\n+ def teardown_class(cls):\n+ shutil.rmtree(\"test_project_with_config\")\n+ shutil.rmtree(\"test_project_with_log\")\n+\n+ def test_init_trackers(self):\n+ hps = None\n+ project_name = \"test_project_with_config\"\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n+ accelerator.init_trackers(project_name, config)\n+ for child in Path(project_name).glob(\"*/**\"):\n+ log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n+ # The config log is stored one layer deeper in the logged directory\n+ # And names are randomly generated each time\n+ si = summary_iterator(str(log))\n+ # Pull HPS through careful parsing\n+ for event in si:\n+ for value in event.summary.value:\n+ proto_bytes = value.metadata.plugin_data.content\n+ plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n+ if plugin_data.HasField(\"session_start_info\"):\n+ hps = dict(plugin_data.session_start_info.hparams)\n+\n+ self.assertTrue(isinstance(hps, dict))\n+ keys = list(hps.keys())\n+ keys.sort()\n+ self.assertEqual(keys, [\"learning_rate\", \"num_iterations\", \"some_boolean\", \"some_string\"])\n+ self.assertEqual(hps[\"num_iterations\"].number_value, 12)\n+ self.assertEqual(hps[\"learning_rate\"].number_value, 0.01)\n+ self.assertEqual(hps[\"some_boolean\"].bool_value, False)\n+ self.assertEqual(hps[\"some_string\"].string_value, \"some_value\")\n+\n+ def test_log(self):\n+ step = None\n+ project_name = \"test_project_with_log\"\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ accelerator.init_trackers(project_name)\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n+ # Names are randomly generated each time\n+ log = list(filter(lambda x: x.is_file(), Path(project_name).iterdir()))[0]\n+ serialized_examples = tf.data.TFRecordDataset(log)", "from_author": true }, { "body": "```suggestion\r\n Should run any finalizing functions within the tracking API. If the API should not have one, just don't overwrite that method.\r\n```", "diff_hunk": "@@ -80,6 +80,16 @@ def log(self, values: dict, step: Optional[int]):\n \"\"\"\n pass\n \n+ @abstractmethod\n+ def finish(self):\n+ \"\"\"\n+ Should run any finalizing functions within the tracking API. If the API should not have one, just return:\n+ ```python\n+ super().finish()\n+ ```", "from_author": false }, { "body": "```suggestion\r\n```\r\nNo need to write something as calling the super method is then the default ;-)", "diff_hunk": "@@ -218,3 +242,7 @@ def log(self, values: dict, step: Optional[int] = None):\n self.writer.set_step(step)\n self.writer.log_others(values)\n logger.info(\"Successfully logged to CometML\")\n+\n+ def finish(self):\n+ \"\"\"Do nothing\"\"\"\n+ super().finish()", "from_author": false }, { "body": "I'd like to avoid this. The API should accept any logging dir for TensorBoard at least, no?", "diff_hunk": "@@ -29,29 +32,29 @@\n \n \n class TensorBoardTrackingTest(unittest.TestCase):\n- @classmethod\n- def teardown_class(cls):\n- shutil.rmtree(\"test_project_with_config\")\n- shutil.rmtree(\"test_project_with_log\")\n-\n def test_init_trackers(self):\n hps = None\n project_name = \"test_project_with_config\"\n- accelerator = Accelerator(log_with=\"tensorboard\")\n- config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n- accelerator.init_trackers(project_name, config)\n- for child in Path(project_name).glob(\"*/**\"):\n- log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n- # The config log is stored one layer deeper in the logged directory\n- # And names are randomly generated each time\n- si = summary_iterator(str(log))\n- # Pull HPS through careful parsing\n- for event in si:\n- for value in event.summary.value:\n- proto_bytes = value.metadata.plugin_data.content\n- plugin_data = plugin_data_pb2.HParamsPluginData.FromString(proto_bytes)\n- if plugin_data.HasField(\"session_start_info\"):\n- hps = dict(plugin_data.session_start_info.hparams)\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ oldpwd = os.getcwd()\n+ os.chdir(dirpath)", "from_author": false }, { "body": "Would be nice to have a `logging_dir` argument in the `Accelerator` I realize.", "diff_hunk": "@@ -64,26 +67,90 @@ def test_init_trackers(self):\n \n def test_log(self):\n step = None\n+ project_name = \"test_project_with_log\"\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ oldpwd = os.getcwd()\n+ os.chdir(dirpath)\n+ accelerator = Accelerator(log_with=\"tensorboard\")\n+ accelerator.init_trackers(project_name)\n+ values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n+ accelerator.log(values, step=0)\n+ accelerator.end_training()\n+ # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n+ # Names are randomly generated each time\n+ log = list(filter(lambda x: x.is_file(), Path(project_name).iterdir()))[0]\n+ serialized_examples = tf.data.TFRecordDataset(log)\n+ for e in serialized_examples:\n+ event = event_pb2.Event.FromString(e.numpy())\n+ if step is None:\n+ step = event.step\n+ for value in event.summary.value:\n+ if value.tag == \"total_loss\":\n+ total_loss = value.simple_value\n+ elif value.tag == \"iteration\":\n+ iteration = value.simple_value\n+ elif value.tag == \"my_text/text_summary\": # Append /text_summary to the key\n+ my_text = value.tensor.string_val[0].decode()\n+ os.chdir(oldpwd)\n+ self.assertAlmostEqual(total_loss, values[\"total_loss\"])\n+ self.assertEqual(iteration, values[\"iteration\"])\n+ self.assertEqual(my_text, values[\"my_text\"])\n+\n+\n+@mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\n+class WandBTrackingTest(unittest.TestCase):\n+ @staticmethod\n+ def get_value_from_log(key: str, log: str, key_occurance: int = 0):\n+ \"\"\"\n+ Parses wandb log for `key` and returns the value.\n+ If parsing through multiple calls to .log, pass in a `key_occurance`\n+ \"\"\"\n+ res = re.findall(rf\"(?<={key} )[^\\s]+\", log)[key_occurance]\n+ if '\"' in res:\n+ return re.findall(r'\"([^\"]*)\"', res)[0]\n+ else:\n+ return res\n+\n+ def test_init_trackers(self):\n+ project_name = \"test_project_with_config\"\n+ with tempfile.TemporaryDirectory() as dirpath:\n+ oldpwd = os.getcwd()\n+ os.chdir(dirpath)", "from_author": false }, { "body": "No need to do a separate job, they can all go in the same one!", "diff_hunk": "@@ -12,6 +12,19 @@ jobs:\n with:\n python-version: 3.6\n - name: Install Python dependencies\n- run: pip install -e .[test]\n+ run: pip install -e .[test,trackers]\n - name: Run Tests\n- run: make test\n\\ No newline at end of file\n+ run: make test\n+\n+ test-tf:", "from_author": false }, { "body": "We should mention the train end method in this example, since it's relevant for trackers.", "diff_hunk": "@@ -0,0 +1,137 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Tracking\n+\n+There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.\n+Accelerate provides a general tracking API that can be used to log useful items during your script through [`~Accelerator.log`]\n+\n+## Integrated Trackers\n+\n+Currently `Accelerate` supports three trackers out-of-the-box:\n+\n+\n+[[autodoc]] tracking.TensorBoardTracker\n+\n+[[autodoc]] tracking.WandBTracker\n+\n+[[autodoc]] tracking.CometMLTracker\n+\n+To use any of them, pass in the selected type(s) to the `log_with` parameter in [`~Accelerate.__init__`]:\n+```python\n+from accelerate import Accelerate\n+from accelerate.utils import LoggerType\n+\n+accelerator = Accelerate(log_with=\"all\") # For all available trackers in the environment\n+accelerator = Accelerate(log_with=\"wandb\")\n+accelerator = Accelerate(log_with=[\"wandb\", LoggerType.TENSORBOARD])\n+```\n+\n+At the start of your experiment [`~Accelerator.init_tracker`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged:\n+```python\n+hps = {\"num_iterations\": 5, \"learning_rate\": 1e-2}\n+accelerator.init_trackers(\"my_project\", config=hps)\n+```\n+\n+When you are ready to log any data, [`~Accelerator.log`] should be used.\n+A `step` can also be passed in to correlate the data with a particular step in the training loop.\n+```python\n+accelerator.log({\"train_loss\": 1.12, \"valid_loss\": 0.8}, step=1)\n+```\n+\n+A full example is below:\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator(log_with=\"all\")\n+config = {\n+ \"num_iterations\": 5,\n+ \"learning_rate\": 1e-2,\n+ \"loss_function\": str(my_loss_function),\n+}\n+\n+accelerator.init_trackers(\"example_project\", config=config)\n+\n+my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+device = accelerator.device\n+my_model.to(device)\n+\n+for iteration in config[\"num_iterations\"]:\n+ for step, batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n+ accelerator.log({\"training_loss\": loss}, step=step)\n+```", "from_author": false }, { "body": "I would group those together.", "diff_hunk": "@@ -20,9 +20,11 @@\n extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n- \"pytest-xdist\",\n+ \"pytest-xdist\"\n ]\n-extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n+extras[\"trackers\"] = [\"wandb\", \"comet-ml\"]\n+extras[\"test_trackers\"] = [\"tensorflow\"]", "from_author": false }, { "body": "```suggestion\r\nextras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\r\n```\r\nPlease let's not include all the trackers in dev ;-)", "diff_hunk": "@@ -20,9 +20,11 @@\n extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n- \"pytest-xdist\",\n+ \"pytest-xdist\"\n ]\n-extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n+extras[\"trackers\"] = [\"wandb\", \"comet-ml\"]\n+extras[\"test_trackers\"] = [\"tensorflow\"]\n+extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"] + extras[\"trackers\"]", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Given https://github.com/huggingface/accelerate/issues/291#issuecomment-1080771134, this PR is feature-complete. Only thing missing is tests for TensorBoard and potential mocking, and other documentation. But otherwise it's ready for a review", "from_author": true }, { "body": "Last review, this time for the docs! πŸ˜„ πŸš€ ", "from_author": true }, { "body": "There's a weird issue when doing `pip install tensorflow` where it will just install a higher `setuptools`, and that setuptools will break importing pytorch. pinning it in the dev requirements \"fixes\" it. Need to play with it more, but a test is there. Once that's solved we probably should mimic something like this for the TF tests as well, since this goes through and looks at the actual values stored", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/293", "pr_id": 888735312 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 4f2a79bcd..2b623db34 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -30,6 +30,7 @@\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n from .utils import (\n DeepSpeedPlugin,\n+ PrecisionType,\n RNGType,\n convert_outputs_to_fp32,\n extract_model_from_parallel,\n@@ -106,19 +107,18 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n- mixed_precision: str = None,\n+ mixed_precision: Union[PrecisionType, str] = None,\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n-\n if mixed_precision is not None:\n- mixed_precision = mixed_precision.lower()\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ mixed_precision = str(mixed_precision)\n+ if mixed_precision not in PrecisionType:\n raise ValueError(\n- f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"\n+ f\"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}\"\n )\n \n if fp16:\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex ff1b148f4..dd946c85d 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -27,7 +27,7 @@\n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.state import ComputeEnvironment, DistributedType\n-from accelerate.utils import PrepareForLaunch, is_sagemaker_available\n+from accelerate.utils import PrecisionType, PrepareForLaunch, is_sagemaker_available\n \n \n def launch_command_parser(subparsers=None):\n@@ -163,10 +163,12 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n-\n- mixed_precision = args.mixed_precision.lower()\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n- raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+ try:\n+ mixed_precision = PrecisionType(args.mixed_precision.lower())\n+ except ValueError:\n+ raise ValueError(\n+ f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n+ )\n \n if args.fp16:\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n@@ -212,10 +214,12 @@ def multi_gpu_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- mixed_precision = args.mixed_precision.lower()\n-\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n- raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+ try:\n+ mixed_precision = PrecisionType(args.mixed_precision.lower())\n+ except ValueError:\n+ raise ValueError(\n+ f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n+ )\n \n if args.fp16:\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n@@ -259,10 +263,12 @@ def deepspeed_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- mixed_precision = args.mixed_precision.lower()\n-\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n- raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+ try:\n+ mixed_precision = PrecisionType(args.mixed_precision.lower())\n+ except ValueError:\n+ raise ValueError(\n+ f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n+ )\n \n if args.fp16:\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n@@ -388,10 +394,12 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n print(\"Converting Arguments to Hyperparameters\")\n hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n \n- mixed_precision = args.mixed_precision.lower()\n-\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n- raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+ try:\n+ mixed_precision = PrecisionType(args.mixed_precision.lower())\n+ except ValueError:\n+ raise ValueError(\n+ f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n+ )\n \n if args.fp16:\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex 48c4404dd..cfd8929d9 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -22,7 +22,7 @@\n from packaging import version\n \n from .state import AcceleratorState\n-from .utils import PrepareForLaunch, patch_environment\n+from .utils import PrecisionType, PrepareForLaunch, patch_environment\n \n \n def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n@@ -107,10 +107,11 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n \"function.\"\n )\n \n- mixed_precision = mixed_precision.lower()\n- if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ try:\n+ mixed_precision = PrecisionType(mixed_precision.lower())\n+ except ValueError:\n raise ValueError(\n- f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"\n+ f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n \n if use_fp16:\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 5b94d7fff..af9299a4f 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -18,7 +18,7 @@\n from collections.abc import Mapping\n from contextlib import contextmanager\n from dataclasses import dataclass, field\n-from enum import Enum\n+from enum import Enum, EnumMeta\n from functools import update_wrapper\n from typing import Any, List, Optional, Union\n \n@@ -51,7 +51,36 @@ def is_sagemaker_available():\n OPTIMIZER_NAME = \"optimizer\"\n \n \n-class RNGType(Enum):\n+class EnumWithContains(EnumMeta):\n+ \"A metaclass that adds the ability to check if `self` contains an item with the `in` operator\"\n+\n+ def __contains__(cls, item):\n+ try:\n+ cls(item)\n+ except ValueError:\n+ return False\n+ return True\n+\n+\n+class BaseEnum(Enum, metaclass=EnumWithContains):\n+ \"An enum class that can get the value of an item with `str(Enum.key)`\"\n+\n+ def __str__(self):\n+ return self.value\n+\n+ @classmethod\n+ def list(cls):\n+ \"Method to list all the possible items in `cls`\"\n+ return list(map(lambda item: str(item), cls))\n+\n+\n+class PrecisionType(BaseEnum):\n+ NO = \"no\"\n+ FP16 = \"fp16\"\n+ BF16 = \"bf16\"\n+\n+\n+class RNGType(BaseEnum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\n XLA = \"xla\"\n", "code_comments": [ { "body": "Here I think you might need to convert this in a list for `choices` to work the proper way.", "diff_hunk": "@@ -58,7 +58,7 @@ def launch_command_parser(subparsers=None):\n \"--mixed_precision\",\n default=\"no\",\n type=str,\n- choices=[\"no\", \"fp16\", \"bf16\"],\n+ choices=PrecisionType,", "from_author": false }, { "body": "A small docstring here on what this adds to a regular `Enum` would be great :-)", "diff_hunk": "@@ -51,7 +51,26 @@ def is_sagemaker_available():\n OPTIMIZER_NAME = \"optimizer\"\n \n \n-class RNGType(Enum):\n+class MetaEnum(EnumMeta):\n+ def __contains__(cls, item):", "from_author": false }, { "body": "```suggestion\r\nclass EnumWithContains(EnumMeta):\r\n```\r\nnit: the name is not very descriptive on what the metaclass does, would switch to this.", "diff_hunk": "@@ -51,7 +51,36 @@ def is_sagemaker_available():\n OPTIMIZER_NAME = \"optimizer\"\n \n \n-class RNGType(Enum):\n+class MetaEnum(EnumMeta):", "from_author": false }, { "body": "```suggestion\r\nclass BaseEnum(Enum, metaclass=EnumWithContains):\r\n```\r\nTo go with the suggestion above.", "diff_hunk": "@@ -51,7 +51,36 @@ def is_sagemaker_available():\n OPTIMIZER_NAME = \"optimizer\"\n \n \n-class RNGType(Enum):\n+class MetaEnum(EnumMeta):\n+ \"A metaclass that adds the ability to check if `self` contains an item with the `in` operator\"\n+\n+ def __contains__(cls, item):\n+ try:\n+ cls(item)\n+ except ValueError:\n+ return False\n+ return True\n+\n+\n+class BaseEnum(Enum, metaclass=MetaEnum):", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/292", "pr_id": 888471361 }, { "diff": "diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nindex 20f963aae..8f266c600 100644\n--- a/docs/source/_toctree.yml\n+++ b/docs/source/_toctree.yml\n@@ -19,4 +19,6 @@\n title: Kwargs Handlers\n - local: internal\n title: Internals\n+ - local: checkpoint\n+ title: Checkpointing\n title: API Reference\ndiff --git a/docs/source/checkpoint.mdx b/docs/source/checkpoint.mdx\nnew file mode 100644\nindex 000000000..26ef21150\n--- /dev/null\n+++ b/docs/source/checkpoint.mdx\n@@ -0,0 +1,60 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Checkpointing\n+\n+When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires\n+saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convience functions to achieve this quickly:\n+- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\n+- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\n+\n+It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.\n+\n+- By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,\n+so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler. \n+\n+Below is a brief example using checkpointing to save and reload a state during training:\n+\n+```python\n+from accelerate import Accelerator\n+import torch\n+\n+accelerator = Accelerator()\n+\n+my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)\n+my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)\n+\n+# Register the LR scheduler\n+accelerate.register_for_checkpointing(my_scheduler)\n+\n+# Save the starting state\n+accelerate.save_state(\"my/save/path\")\n+\n+device = accelerator.device\n+my_model.to(device)\n+\n+# Perform training\n+for epoch in range(num_epochs):\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ accelerator.backward(loss)\n+ my_optimizer.step()\n+ my_scheduler.step()\n+\n+# Restore previous state\n+accelerate.load_state(\"my/save/path\")\n+```\n\\ No newline at end of file\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nindex f16047925..b6f539fd0 100644\n--- a/docs/source/quicktour.mdx\n+++ b/docs/source/quicktour.mdx\n@@ -340,6 +340,16 @@ unwrapped_model.load_state_dict(torch.load(filename))\n \n Note that since all the model parameters are references to tensors, this will load your weights inside `model`.\n \n+## Saving/loading entire states\n+\n+When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.\n+You can use `accelerator.save_state` and `accelerator.load_state` respectively to do so, just by simply passing in a save location. \n+If you have registered any other stateful items to be stored through `accelerator.register_for_checkpointing` they will also be saved and/or loaded.\n+<Tip>\n+ Every object passed to `register_for_checkpointing` must have a `load_state_dict` and `save_dict` function to be stored\n+</Tip>\n+\n+\n ### Gradient clipping\n \n If you are using gradient clipping in your script, you should replace the calls to\n", "code_comments": [ { "body": "```suggestion\r\nIt should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.\r\n```", "diff_hunk": "@@ -0,0 +1,25 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Checkpointing\n+\n+When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires\n+saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convience functions to achieve this quickly:\n+- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\n+- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\n+\n+It should be noted that these states _must_ be from the same contiguous training script, they should not be from two seperate runs or scripts. ", "from_author": false }, { "body": "There is no object in the lib called `Checkpointing`, so this won't work. this is where you should show a full example.", "diff_hunk": "@@ -0,0 +1,25 @@\n+<!--Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Checkpointing\n+\n+When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires\n+saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convience functions to achieve this quickly:\n+- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location\n+- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`\n+\n+It should be noted that these states _must_ be from the same contiguous training script, they should not be from two seperate runs or scripts. \n+\n+- By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,\n+so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler. \n+\n+[[autodoc]] Checkpointing", "from_author": false } ], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/290", "pr_id": 886428060 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex fabd3e9fc..f01d06b98 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -14,6 +14,7 @@\n \n import gc\n import os\n+import sys\n import warnings\n from contextlib import contextmanager\n from typing import List, Optional, Union\n@@ -705,7 +706,7 @@ def autocast(self):\n \n autocast_context.__enter__()\n yield\n- autocast_context.__exit__()\n+ autocast_context.__exit__(*sys.exc_info())\n else:\n yield\n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/284", "pr_id": 881527884 }, { "diff": "diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md\nindex 66b0a8538..3073b3381 100644\n--- a/CONTRIBUTING.md\n+++ b/CONTRIBUTING.md\n@@ -26,7 +26,7 @@ on the awesome projects it made possible, shout out on Twitter every time it has\n helped you, or simply star the repo to say \"thank you\".\n \n Whichever way you choose to contribute, please be mindful to respect our\n-[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+[code of conduct](https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md).\n \n ## You can contribute in so many ways!\n \n@@ -118,7 +118,7 @@ Follow these steps to start contributing:\n $ git checkout -b a-descriptive-name-for-my-changes\n ```\n \n- **Do not** work on the `master` branch.\n+ **Do not** work on the `main` branch.\n \n 4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:\n \n@@ -184,7 +184,7 @@ Follow these steps to start contributing:\n \n ```bash\n $ git fetch upstream\n- $ git rebase upstream/master\n+ $ git rebase upstream/main\n ```\n \n Push the changes to your account using:\n@@ -220,7 +220,7 @@ See an example of a good PR here: https://github.com/huggingface/accelerate/pull\n ### Tests\n \n An extensive test suite is included to test the library behavior and several examples. Library tests can be found in\n-the [tests folder](https://github.com/huggingface/accelerate/tree/master/tests).\n+the [tests folder](https://github.com/huggingface/accelerate/tree/main/tests).\n \n We use `pytest` in order to run the tests. From the root of the\n repository, here's how to run tests with `pytest` for the library:\ndiff --git a/README.md b/README.md\nindex c27131808..3e21a3ae1 100644\n--- a/README.md\n+++ b/README.md\n@@ -26,7 +26,7 @@ limitations under the License.\n <img alt=\"Build\" src=\"https://img.shields.io/circleci/build/github/huggingface/transformers/master\">\n </a>\n -->\n- <a href=\"https://github.com/huggingface/accelerate/blob/master/LICENSE\">\n+ <a href=\"https://github.com/huggingface/accelerate/blob/main/LICENSE\">\n <img alt=\"License\" src=\"https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue\">\n </a>\n <a href=\"https://huggingface.co/docs/accelerate/index.html\">\n@@ -35,7 +35,7 @@ limitations under the License.\n <a href=\"https://github.com/huggingface/accelerate/releases\">\n <img alt=\"GitHub release\" src=\"https://img.shields.io/github/release/huggingface/accelerate.svg\">\n </a>\n- <a href=\"https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md\">\n+ <a href=\"https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md\">\n <img alt=\"Contributor Covenant\" src=\"https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg\">\n </a>\n </p>\ndiff --git a/docs/source/installation.mdx b/docs/source/installation.mdx\nindex 4564ba00c..be8394af7 100644\n--- a/docs/source/installation.mdx\n+++ b/docs/source/installation.mdx\n@@ -55,9 +55,9 @@ Here is how to quickly install `accelerate` from source:\n pip install git+https://github.com/huggingface/accelerate\n ```\n \n-Note that this will install not the latest released version, but the bleeding edge `master` version, which you may want to use in case a bug has been fixed since the last official release and a new release hasn't been yet rolled out.\n+Note that this will install not the latest released version, but the bleeding edge `main` version, which you may want to use in case a bug has been fixed since the last official release and a new release hasn't been yet rolled out.\n \n-While we strive to keep `master` operational at all times, if you notice some issues, they usually get fixed within a few hours or a day and and you're more than welcome to help us detect any problems by opening an [Issue](https://github.com/huggingface/accelerate/issues) and this way, things will get fixed even sooner.\n+While we strive to keep `main` operational at all times, if you notice some issues, they usually get fixed within a few hours or a day and and you're more than welcome to help us detect any problems by opening an [Issue](https://github.com/huggingface/accelerate/issues) and this way, things will get fixed even sooner.\n \n Again, you can run:\n \n@@ -69,7 +69,7 @@ to check πŸ€— Accelerate is properly installed.\n \n ## Editable install\n \n-If you want to constantly use the bleeding edge `master` version of the source code, or if you want to contribute to the library and need to test the changes in the code you're making, you will need an editable install. This is done by cloning the repository and installing with the following commands:\n+If you want to constantly use the bleeding edge `main` version of the source code, or if you want to contribute to the library and need to test the changes in the code you're making, you will need an editable install. This is done by cloning the repository and installing with the following commands:\n \n ``` bash\n git clone https://github.com/huggingface/accelerate.git\n@@ -85,7 +85,7 @@ now this editable install will reside where you clone the folder to, e.g. `~/acc\n \n Do note that you have to keep that `accelerate` folder around and not delete it to continue using the πŸ€— Accelerate library.\n \n-Now, let's get to the real benefit of this installation approach. Say, you saw some new feature has been just committed into `master`. If you have already performed all the steps above, to update your accelerate repo to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:\n+Now, let's get to the real benefit of this installation approach. Say, you saw some new feature has been just committed into `main`. If you have already performed all the steps above, to update your accelerate repo to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:\n \n ```bash\n cd ~/accelerate/\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/282", "pr_id": 879255432 }, { "diff": "diff --git a/src/accelerate/commands/accelerate_cli.py b/src/accelerate/commands/accelerate_cli.py\nindex e73ccd74c..6300c7c1d 100644\n--- a/src/accelerate/commands/accelerate_cli.py\n+++ b/src/accelerate/commands/accelerate_cli.py\n@@ -17,6 +17,7 @@\n from argparse import ArgumentParser\n \n from accelerate.commands.config import config_command_parser\n+from accelerate.commands.env import env_command_parser\n from accelerate.commands.launch import launch_command_parser\n from accelerate.commands.test import test_command_parser\n \n@@ -29,6 +30,7 @@ def main():\n config_command_parser(subparsers=subparsers)\n launch_command_parser(subparsers=subparsers)\n test_command_parser(subparsers=subparsers)\n+ env_command_parser(subparsers=subparsers)\n \n # Let's go\n args = parser.parse_args()\ndiff --git a/src/accelerate/commands/env.py b/src/accelerate/commands/env.py\nnew file mode 100644\nindex 000000000..b66008e1b\n--- /dev/null\n+++ b/src/accelerate/commands/env.py\n@@ -0,0 +1,68 @@\n+import argparse\n+import os\n+import platform\n+\n+import numpy as np\n+import torch\n+\n+from accelerate import __version__ as version\n+from accelerate.commands.config import default_config_file, load_config_from_file\n+\n+\n+def env_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"env\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate env command\")\n+\n+ parser.add_argument(\n+ \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=env_command)\n+ return parser\n+\n+\n+def env_command(args):\n+ pt_version = torch.__version__\n+ pt_cuda_available = torch.cuda.is_available()\n+\n+ accelerate_config = \"Not found\"\n+ # Get the default from the config file.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ accelerate_config = load_config_from_file(args.config_file).to_dict()\n+\n+ info = {\n+ \"`Accelerate` version\": version,\n+ \"Platform\": platform.platform(),\n+ \"Python version\": platform.python_version(),\n+ \"Numpy version\": np.__version__,\n+ \"PyTorch version (GPU?)\": f\"{pt_version} ({pt_cuda_available})\",\n+ }\n+\n+ print(\"\\nCopy-and-paste the text below in your GitHub issue\\n\")\n+ print(\"\\n\".join([f\"- {prop}: {val}\" for prop, val in info.items()]))\n+\n+ print(\"- `Accelerate` default config:\" if args.config_file is None else \"- `Accelerate` config passed:\")\n+ accelerate_config_str = (\n+ \"\\n\".join([f\"\\t- {prop}: {val}\" for prop, val in accelerate_config.items()])\n+ if isinstance(accelerate_config, dict)\n+ else f\"\\t{accelerate_config}\"\n+ )\n+ print(accelerate_config_str)\n+\n+ info[\"`Accelerate` configs\"] = accelerate_config\n+\n+ return info\n+\n+\n+def main() -> int:\n+ parser = env_command_parser()\n+ args = parser.parse_args()\n+ env_command(args)\n+ return 0\n+\n+\n+if __name__ == \"__main__\":\n+ raise SystemExit(main())\n", "code_comments": [ { "body": "Note that the command submodules will import `accelerate.state` (via the lancuh command) so the code will fail if PyTorch is not installed anyway (which shouldn't be the case since it's a dependency).\r\n\r\nI'd remove that import and the test below that uses it.", "diff_hunk": "@@ -0,0 +1,51 @@\n+import argparse\n+import platform\n+\n+import numpy as np\n+\n+from accelerate import __version__ as version\n+from accelerate.file_utils import is_torch_available", "from_author": false }, { "body": "Maybe we could add the content of the default config to this info? It can be done in a followup PR if you don't have time to add it right now.", "diff_hunk": "@@ -0,0 +1,51 @@\n+import argparse\n+import platform\n+\n+import numpy as np\n+\n+from accelerate import __version__ as version\n+from accelerate.file_utils import is_torch_available\n+\n+\n+def env_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"env\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate env command\")\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=env_command)\n+ return parser\n+\n+\n+def env_command(args):\n+ pt_version = \"not installed\"\n+ pt_cuda_available = \"NA\"\n+ if is_torch_available():\n+ import torch\n+\n+ pt_version = torch.__version__\n+ pt_cuda_available = torch.cuda.is_available()\n+\n+ info = {\n+ \"`accelerate` version\": version,\n+ \"Platform\": platform.platform(),\n+ \"Python version\": platform.python_version(),\n+ \"Numpy version\": np.__version__,\n+ \"PyTorch version (GPU?)\": f\"{pt_version} ({pt_cuda_available})\",\n+ }", "from_author": false }, { "body": "As discussed above, not need to add that new file.", "diff_hunk": "@@ -0,0 +1,19 @@\n+import importlib.util", "from_author": false }, { "body": "This shouldn't be necessary either", "diff_hunk": "@@ -30,6 +31,14 @@\n from .state import AcceleratorState, DistributedType, is_deepspeed_available, is_tpu_available\n \n \n+# The package importlib_metadata is in a different place, depending on the python version.\n+if sys.version_info < (3, 8):\n+ import importlib_metadata # noqa: F401\n+\n+else:\n+ import importlib.metadata as importlib_metadata # noqa: F401", "from_author": false }, { "body": "I don't understand, default config for what?", "diff_hunk": "@@ -0,0 +1,51 @@\n+import argparse\n+import platform\n+\n+import numpy as np\n+\n+from accelerate import __version__ as version\n+from accelerate.file_utils import is_torch_available\n+\n+\n+def env_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"env\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate env command\")\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=env_command)\n+ return parser\n+\n+\n+def env_command(args):\n+ pt_version = \"not installed\"\n+ pt_cuda_available = \"NA\"\n+ if is_torch_available():\n+ import torch\n+\n+ pt_version = torch.__version__\n+ pt_cuda_available = torch.cuda.is_available()\n+\n+ info = {\n+ \"`accelerate` version\": version,\n+ \"Platform\": platform.platform(),\n+ \"Python version\": platform.python_version(),\n+ \"Numpy version\": np.__version__,\n+ \"PyTorch version (GPU?)\": f\"{pt_version} ({pt_cuda_available})\",\n+ }", "from_author": true }, { "body": "The default config of Accelerate in `default_config.yaml`", "diff_hunk": "@@ -0,0 +1,51 @@\n+import argparse\n+import platform\n+\n+import numpy as np\n+\n+from accelerate import __version__ as version\n+from accelerate.file_utils import is_torch_available\n+\n+\n+def env_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"env\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate env command\")\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=env_command)\n+ return parser\n+\n+\n+def env_command(args):\n+ pt_version = \"not installed\"\n+ pt_cuda_available = \"NA\"\n+ if is_torch_available():\n+ import torch\n+\n+ pt_version = torch.__version__\n+ pt_cuda_available = torch.cuda.is_available()\n+\n+ info = {\n+ \"`accelerate` version\": version,\n+ \"Platform\": platform.platform(),\n+ \"Python version\": platform.python_version(),\n+ \"Numpy version\": np.__version__,\n+ \"PyTorch version (GPU?)\": f\"{pt_version} ({pt_cuda_available})\",\n+ }", "from_author": false }, { "body": "Okay, got it, I'll add this", "diff_hunk": "@@ -0,0 +1,51 @@\n+import argparse\n+import platform\n+\n+import numpy as np\n+\n+from accelerate import __version__ as version\n+from accelerate.file_utils import is_torch_available\n+\n+\n+def env_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"env\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate env command\")\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=env_command)\n+ return parser\n+\n+\n+def env_command(args):\n+ pt_version = \"not installed\"\n+ pt_cuda_available = \"NA\"\n+ if is_torch_available():\n+ import torch\n+\n+ pt_version = torch.__version__\n+ pt_cuda_available = torch.cuda.is_available()\n+\n+ info = {\n+ \"`accelerate` version\": version,\n+ \"Platform\": platform.platform(),\n+ \"Python version\": platform.python_version(),\n+ \"Numpy version\": np.__version__,\n+ \"PyTorch version (GPU?)\": f\"{pt_version} ({pt_cuda_available})\",\n+ }", "from_author": true }, { "body": "```suggestion\r\n print(\"- `Accelerate` default config:\" if args.config_file is None else \"- `Accelerate` config passed:\")\r\n```\r\nLet's just put a different print depending on whether we have the default config or a custom one.", "diff_hunk": "@@ -0,0 +1,68 @@\n+import argparse\n+import os\n+import platform\n+\n+import numpy as np\n+import torch\n+\n+from accelerate import __version__ as version\n+from accelerate.commands.config import default_config_file, load_config_from_file\n+\n+\n+def env_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"env\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate env command\")\n+\n+ parser.add_argument(\n+ \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=env_command)\n+ return parser\n+\n+\n+def env_command(args):\n+ pt_version = torch.__version__\n+ pt_cuda_available = torch.cuda.is_available()\n+\n+ accelerate_config = \"Not found\"\n+ # Get the default from the config file.\n+ if args.config_file is not None or os.path.isfile(default_config_file):\n+ accelerate_config = load_config_from_file(args.config_file).to_dict()\n+\n+ info = {\n+ \"`Accelerate` version\": version,\n+ \"Platform\": platform.platform(),\n+ \"Python version\": platform.python_version(),\n+ \"Numpy version\": np.__version__,\n+ \"PyTorch version (GPU?)\": f\"{pt_version} ({pt_cuda_available})\",\n+ }\n+\n+ print(\"\\nCopy-and-paste the text below in your GitHub issue\\n\")\n+ print(\"\\n\".join([f\"- {prop}: {val}\" for prop, val in info.items()]))\n+\n+ print(\"- `Accelerate` configs:\")", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/280", "pr_id": 878188812 }, { "diff": "diff --git a/.github/workflows/build_dev_documentation.yml b/.github/workflows/build_dev_documentation.yml\ndeleted file mode 100644\nindex 627631047..000000000\n--- a/.github/workflows/build_dev_documentation.yml\n+++ /dev/null\n@@ -1,108 +0,0 @@\n-name: Build dev documentation\n-\n-on:\n- pull_request:\n-\n-concurrency:\n- group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n- cancel-in-progress: true\n-\n-jobs:\n- build_and_package:\n- runs-on: ubuntu-latest\n- container:\n- image: huggingface/transformers-doc-builder\n- env:\n- COMMIT_SHA: ${{ github.event.pull_request.head.sha }}\n- PR_NUMBER: ${{ github.event.number }}\n- EVENT_CONTEXT: ${{ toJSON(github.event) }}\n-\n- steps:\n- - uses: actions/checkout@v2\n- with:\n- repository: 'huggingface/doc-builder'\n- path: doc-builder\n-\n- - uses: actions/checkout@v2\n- with:\n- repository: 'huggingface/accelerate'\n- path: accelerate\n-\n- - uses: actions/setup-node@v2\n- with:\n- node-version: '16'\n-\n- - name: Set env\n- run: |\n- echo \"WRITE=$(echo 'ghp_'$(wget -qO- lysand.re/doc-build-dev)'bm')\" >> $GITHUB_ENV\n-\n- - name: Setup environment\n- run: |\n- rm -rf doc-build-dev\n- git clone --depth 1 https://HuggingFaceDocBuilderDev:${{ env.WRITE }}@github.com/huggingface/doc-build-dev\n- \n- pip uninstall -y doc-builder\n- cd doc-builder\n- git pull origin main\n- pip install -e .\n- cd ..\n- \n- cd accelerate\n- pip install .[quality]\n- cd ..\n-\n- - name: Setup git\n- run: |\n- git config --global user.name \"Hugging Face Doc Builder\"\n- git config --global user.email docs@huggingface.co\n-\n-\n- - name: Comment PR\n- uses: thollander/actions-comment-pull-request@v1\n- if: github.event.action == 'opened'\n-\n- with:\n- message: 'The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_${{ env.PR_NUMBER }}). All of your documentation changes will be reflected on that endpoint.'\n- GITHUB_TOKEN: ${{ env.WRITE }}\n-\n- - name: Find Comment\n- if: github.event.action == 'reopened'\n- uses: peter-evans/find-comment@v1\n- id: fc\n- with:\n- issue-number: ${{ env.PR_NUMBER }}\n- comment-author: HuggingFaceDocBuilderDev\n-\n- - name: Update comment\n- if: github.event.action == 'reopened'\n- uses: peter-evans/create-or-update-comment@v1\n- with:\n- comment-id: ${{ steps.fc.outputs.comment-id }}\n- token: ${{ env.WRITE }}\n- edit-mode: replace\n- body: |\n- The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/transformers/pr_${{ env.PR_NUMBER }}). All of your documentation changes will be reflected on that endpoint.\n-\n- - name: Make documentation\n- env:\n- NODE_OPTIONS: --max-old-space-size=6656\n- run: |\n- cd doc-build-dev && git pull\n- cd ../doc-builder\n- doc-builder build accelerate ../accelerate/docs/source --build_dir ../doc-build-dev --clean --version pr_$PR_NUMBER --html\n-\n- - name: Push to repositories\n- run: |\n- cd doc-build-dev\n- ls\n- git status\n-\n- if [[ `git status --porcelain` ]]; then\n- git add .\n- git stash && git pull && git stash apply\n- git commit -m \"Updated with commit $COMMIT_SHA See: https://github.com/huggingface/accelerate/commit/$COMMIT_SHA\"\n- git push origin main\n- else\n- echo \"No diff in the documentation.\"\n- fi\n- shell: bash\n\\ No newline at end of file\ndiff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml\nindex 9ed1c4887..082ece25e 100644\n--- a/.github/workflows/build_documentation.yml\n+++ b/.github/workflows/build_documentation.yml\n@@ -8,87 +8,10 @@ on:\n - v*-release\n \n jobs:\n- build_and_package:\n- runs-on: ubuntu-latest\n- defaults:\n- run:\n- shell: bash -l {0}\n-\n- steps:\n- - uses: actions/setup-node@v2\n- with:\n- node-version: '16'\n-\n- - uses: actions/checkout@v2\n- with:\n- repository: 'huggingface/doc-builder'\n- path: doc-builder\n-\n- - uses: actions/checkout@v2\n- with:\n- repository: 'huggingface/doc-build'\n- path: doc-build\n- token: ${{ secrets.HUGGINGFACE_PUSH }}\n- \n- - uses: actions/checkout@v2\n- with:\n- repository: 'huggingface/accelerate'\n- path: accelerate\n- \n- - name: Loading cache.\n- uses: actions/cache@v2\n- id: cache\n- with:\n- path: ~/.cache/pip\n- key: v1-test_build_doc\n- restore-keys: |\n- v1-test_build_doc-${{ hashFiles('setup.py') }}\n- v1-test_build_doc\n-\n- - name: Setup environment\n- run: |\n- pip install git+https://github.com/huggingface/doc-builder\n- cd accelerate\n- pip install .[quality]\n- cd ..\n-\n- - name: Setup git\n- run: |\n- git config --global user.name \"Hugging Face Doc Builder\"\n- git config --global user.email docs@huggingface.co\n- \n- - name: Create build directory\n- run: |\n- cd doc-build\n- git pull\n- cd ..\n- mkdir build_dir\n- mkdir build_dir/accelerate\n- cp doc-build/accelerate/_versions.yml build_dir/accelerate\n-\n- - name: Make documentation\n- run: |\n- cd doc-builder &&\n- doc-builder build accelerate ../accelerate/docs/source --build_dir ../build_dir --clean --html &&\n- cd ..\n- env:\n- NODE_OPTIONS: --max-old-space-size=6656\n-\n- - name: Push to repositories\n- run: |\n- cd doc-build\n- git pull\n- mv ../build_dir/accelerate/_versions.yml accelerate/\n- rm -rf accelerate/$(ls ../build_dir/accelerate)\n- mv ../build_dir/accelerate/$(ls ../build_dir/accelerate) accelerate/$(ls ../build_dir/accelerate)\n- git status\n-\n- if [[ `git status --porcelain` ]]; then \n- git add .\n- git commit -m \"Updated with commit ${{ github.sha }} \\n\\nSee: https://github.com/huggingface/accelerate/commit/${{ github.sha }}\"\n- git push origin main\n- else\n- echo \"No diff in the documentation.\"\n- fi\n-\n- cd ..\n+ build:\n+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main\n+ with:\n+ commit_sha: ${{ github.sha }}\n+ package: accelerate\n+ secrets:\n+ token: ${{ secrets.HUGGINGFACE_PUSH }}\ndiff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml\nnew file mode 100644\nindex 000000000..dc56751c6\n--- /dev/null\n+++ b/.github/workflows/build_pr_documentation.yml\n@@ -0,0 +1,16 @@\n+name: Build PR Documentation\n+\n+on:\n+ pull_request:\n+\n+concurrency:\n+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n+ cancel-in-progress: true\n+\n+jobs:\n+ build:\n+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main\n+ with:\n+ commit_sha: ${{ github.event.pull_request.head.sha }}\n+ pr_number: ${{ github.event.number }}\n+ package: accelerate\ndiff --git a/.github/workflows/delete_dev_documentation.yml b/.github/workflows/delete_dev_documentation.yml\ndeleted file mode 100644\nindex ea2f055b6..000000000\n--- a/.github/workflows/delete_dev_documentation.yml\n+++ /dev/null\n@@ -1,63 +0,0 @@\n-name: Delete dev documentation\n-\n-on:\n- pull_request:\n- types: [ closed ]\n-\n-\n-jobs:\n- build_and_package:\n- runs-on: ubuntu-latest\n- container:\n- image: huggingface/transformers-doc-builder\n-\n- env:\n- PR_NUMBER: ${{ github.event.number }}\n-\n- steps:\n- - name: Set env\n- run: |\n- echo \"WRITE=$(echo 'ghp_'$(wget -qO- lysand.re/doc-build-dev)'bm')\" >> $GITHUB_ENV\n- \n- - name: Setup environment\n- run: |\n- rm -rf doc-build-dev\n- git clone --depth 1 https://HuggingFaceDocBuilderDev:${{ env.WRITE }}@github.com/huggingface/doc-build-dev\n-\n- - name: Setup git\n- run: |\n- git config --global user.name \"Hugging Face Doc Builder\"\n- git config --global user.email docs@huggingface.co\n-\n- - name: Push to repositories\n- run: |\n- cd doc-build-dev\n- rm -rf accelerate/pr_$PR_NUMBER\n- ls\n- git status\n- if [[ `git status --porcelain` ]]; then\n- git add .\n- git commit -m \"Closed PR $PR_NUMBER\"\n- git push origin main\n- else\n- echo \"Branch was already deleted, nothing to do.\"\n- fi\n- shell: bash\n-\n- - name: Find Comment\n- if: ${{ always() }}\n- uses: peter-evans/find-comment@v1\n- id: fc\n- with:\n- issue-number: ${{ env.PR_NUMBER }}\n- comment-author: HuggingFaceDocBuilderDev\n-\n- - name: Update comment\n- if: ${{ always() }}\n- uses: peter-evans/create-or-update-comment@v1\n- with:\n- comment-id: ${{ steps.fc.outputs.comment-id }}\n- token: ${{ env.WRITE }}\n- edit-mode: replace\n- body: |\n- _The documentation is not available anymore as the PR was closed or merged._\ndiff --git a/.github/workflows/delete_doc_comment.yml b/.github/workflows/delete_doc_comment.yml\nnew file mode 100644\nindex 000000000..da61d21df\n--- /dev/null\n+++ b/.github/workflows/delete_doc_comment.yml\n@@ -0,0 +1,13 @@\n+name: Delete dev documentation\n+\n+on:\n+ pull_request:\n+ types: [ closed ]\n+\n+\n+jobs:\n+ delete:\n+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main\n+ with:\n+ pr_number: ${{ github.event.number }}\n+ package: accelerate\ndiff --git a/setup.py b/setup.py\nindex e1b2d3d1b..5d4dc66a5 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -22,6 +22,7 @@\n \"pytest\",\n \"pytest-xdist\",\n ]\n+extras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n \n extras[\"sagemaker\"] = [\n \"sagemaker\", # boto3 is a required package in sagemaker\n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/275", "pr_id": 877714942 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 4b20cc1c3..3cd8d2695 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -19,6 +19,7 @@\n from contextlib import contextmanager\n from dataclasses import dataclass, field\n from enum import Enum\n+from functools import update_wrapper\n from typing import Any, List, Optional, Union\n \n import numpy as np\n@@ -273,11 +274,13 @@ def _is_fp16_bf16_tensor(tensor):\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n \n \n-def convert_outputs_to_fp32(model_forward):\n+class ConvertOutputsToFp32:\n \"\"\"\n Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n precision will be convert back to FP32.\n \n+ Use a class instead of a decorator because otherwise, the prepared model can no longer be pickled (issue #273).\n+\n Args:\n model_forward (:obj:`Callable`):\n The function which outputs we want to treat.\n@@ -286,11 +289,15 @@ def convert_outputs_to_fp32(model_forward):\n The same function as :obj:`model_forward` but with converted outputs.\n \"\"\"\n \n- def convert_outputs(*args, **kwargs):\n- outputs = model_forward(*args, **kwargs)\n- return convert_to_fp32(outputs)\n+ def __init__(self, model_forward):\n+ self.model_forward = model_forward\n+ update_wrapper(self, model_forward)\n+\n+ def __call__(self, *args, **kwargs):\n+ return convert_to_fp32(self.model_forward(*args, **kwargs))\n+\n \n- return convert_outputs\n+convert_outputs_to_fp32 = ConvertOutputsToFp32\n \n \n def extract_model_from_parallel(model):\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex a31c45d31..0beba353a 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -13,12 +13,14 @@\n # limitations under the License.\n \n import os\n+import pickle\n import unittest\n from collections import UserDict, namedtuple\n \n import torch\n \n-from accelerate.utils import patch_environment, send_to_device\n+from accelerate.test_utils.training import RegressionModel\n+from accelerate.utils import convert_outputs_to_fp32, patch_environment, send_to_device\n \n \n TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b c\")\n@@ -71,3 +73,8 @@ def test_patch_environment(self):\n \n self.assertNotIn(\"AA\", os.environ)\n self.assertNotIn(\"BB\", os.environ)\n+\n+ def test_convert_to_32_lets_model_pickle(self):\n+ model = RegressionModel()\n+ model.forward = convert_outputs_to_fp32(model.forward)\n+ _ = pickle.dumps(model)\n", "code_comments": [ { "body": "I was thinking having this test lie in `test_utils` instead in a new `test_model_can_pickle` after it's treated. Let me know if you'd like me to take this part.", "diff_hunk": "@@ -268,6 +270,11 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ try:\n+ _ = pickle.dumps(model)\n+ except AttributeError:\n+ assert False, \"Could not pickle model\"", "from_author": false }, { "body": "Makes sense. Please go ahead.", "diff_hunk": "@@ -268,6 +270,11 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ try:\n+ _ = pickle.dumps(model)\n+ except AttributeError:\n+ assert False, \"Could not pickle model\"", "from_author": true }, { "body": "It looks like you did not authorize maintainers to push to your branch. I made a PR here: https://github.com/BenjaminBossan/accelerate/pull/1/files", "diff_hunk": "@@ -268,6 +270,11 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ try:\n+ _ = pickle.dumps(model)\n+ except AttributeError:\n+ assert False, \"Could not pickle model\"", "from_author": false }, { "body": "![Screenshot from 2022-03-10 21-30-41](https://user-images.githubusercontent.com/6229650/157749451-9c08cddc-4c76-4902-ba0b-103dec6b217c.png)\r\n\r\nStrange, I explicitly chose that option. ", "diff_hunk": "@@ -268,6 +270,11 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ try:\n+ _ = pickle.dumps(model)\n+ except AttributeError:\n+ assert False, \"Could not pickle model\"", "from_author": true }, { "body": "I merged your PR, thanks.", "diff_hunk": "@@ -268,6 +270,11 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ try:\n+ _ = pickle.dumps(model)\n+ except AttributeError:\n+ assert False, \"Could not pickle model\"", "from_author": true }, { "body": "GitHub was giving me an error :-( Thanks for merging it!", "diff_hunk": "@@ -268,6 +270,11 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ try:\n+ _ = pickle.dumps(model)\n+ except AttributeError:\n+ assert False, \"Could not pickle model\"", "from_author": false }, { "body": "```suggestion\r\n```\r\nForgot to remove this :-)", "diff_hunk": "@@ -14,6 +14,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import pickle\n+", "from_author": false } ], "context": [ { "body": "Thanks again for your contribution!", "from_author": false }, { "body": "Thanks for the fast response and help!", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/274", "pr_id": 875837208 }, { "diff": "diff --git a/.github/deploy_doc.sh b/.github/deploy_doc.sh\ndeleted file mode 100755\nindex 1e9270313..000000000\n--- a/.github/deploy_doc.sh\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-#!/bin/bash\n-set -ex\n-\n-function deploy_doc(){\n-\techo \"Creating doc at commit $1 and pushing to folder $2\"\n-\tgit checkout $1\n-\tcd \"$GITHUB_WORKSPACE\"\n-\tpip install -U .\n-\tcd \"$GITHUB_WORKSPACE/docs\"\n-\tif [ ! -z \"$2\" ]\n-\tthen\n-\t\tif [ \"$2\" == \"main\" ]; then\n-\t\t echo \"Pushing main\"\n-\t\t\tmake clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $DOC_HOST:$DOC_PATH/$2/\n-\t\t\tcp -r _build/html/_static .\n-\t\telif ssh -oStrictHostKeyChecking=no $DOC_HOST \"[ -d $DOC_PATH/$2 ]\"; then\n-\t\t\techo \"Directory\" $2 \"already exists\"\n-\t\t\tscp -r -oStrictHostKeyChecking=no _static/* $DOC_HOST:$DOC_PATH/$2/_static/\n-\t\telse\n-\t\t\techo \"Pushing version\" $2\n-\t\t\tmake clean && make html\n-\t\t\trm -rf _build/html/_static\n-\t\t\tcp -r _static _build/html\n-\t\t\tscp -r -oStrictHostKeyChecking=no _build/html $DOC_HOST:$DOC_PATH/$2\n-\t\tfi\n-\telse\n-\t\techo \"Pushing stable\"\n-\t\tmake clean && make html\n-\t\trm -rf _build/html/_static\n-\t\tcp -r _static _build/html\n-\t\tscp -r -oStrictHostKeyChecking=no _build/html/* $DOC_HOST:$DOC_PATH\n-\tfi\n-}\n-\n-\n-# You can find the commit for each tag on https://github.com/huggingface/accelerate/tags\n-deploy_doc \"main\" main\n-deploy_doc \"0fbbbc5\" v0.1.0\n-deploy_doc \"499a5e5\" v0.2.0\n-deploy_doc \"dd9f7aa\" v0.3.0\n-deploy_doc \"5d99345\" v0.4.0\n-deploy_doc \"56d8760\" # v0.5.0 Latest stable release\n\\ No newline at end of file\ndiff --git a/.github/workflows/build_dev_documentation.yml b/.github/workflows/build_dev_documentation.yml\nnew file mode 100644\nindex 000000000..fd5384fc7\n--- /dev/null\n+++ b/.github/workflows/build_dev_documentation.yml\n@@ -0,0 +1,108 @@\n+name: Build dev documentation\n+\n+on:\n+ pull_request:\n+\n+concurrency:\n+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n+ cancel-in-progress: true\n+\n+jobs:\n+ build_and_package:\n+ runs-on: ubuntu-latest\n+ container:\n+ image: huggingface/transformers-doc-builder\n+ env:\n+ COMMIT_SHA: ${{ github.event.pull_request.head.sha }}\n+ PR_NUMBER: ${{ github.event.number }}\n+ EVENT_CONTEXT: ${{ toJSON(github.event) }}\n+\n+ steps:\n+ - uses: actions/checkout@v2\n+ with:\n+ repository: 'huggingface/doc-builder'\n+ path: doc-builder\n+\n+ - uses: actions/checkout@v2\n+ with:\n+ repository: 'huggingface/accelerate'\n+ path: accelerate\n+\n+ - uses: actions/setup-node@v2\n+ with:\n+ node-version: '16'\n+\n+ - name: Set env\n+ run: |\n+ echo \"WRITE=$(echo 'ghp_'$(wget -qO- lysand.re/doc-build-dev)'bm')\" >> $GITHUB_ENV\n+\n+ - name: Setup environment\n+ run: |\n+ rm -rf doc-build-dev\n+ git clone --depth 1 https://HuggingFaceDocBuilderDev:${{ env.WRITE }}@github.com/huggingface/doc-build-dev\n+ \n+ pip uninstall -y doc-builder\n+ cd doc-builder\n+ git pull origin main\n+ pip install -e .\n+ cd ..\n+ \n+ cd accelerate\n+ pip install .[dev]\n+ cd ..\n+\n+ - name: Setup git\n+ run: |\n+ git config --global user.name \"Hugging Face Doc Builder\"\n+ git config --global user.email docs@huggingface.co\n+\n+\n+ - name: Comment PR\n+ uses: thollander/actions-comment-pull-request@v1\n+ if: github.event.action == 'opened'\n+\n+ with:\n+ message: 'The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/accelerate/pr_${{ env.PR_NUMBER }}). All of your documentation changes will be reflected on that endpoint.'\n+ GITHUB_TOKEN: ${{ env.WRITE }}\n+\n+ - name: Find Comment\n+ if: github.event.action == 'reopened'\n+ uses: peter-evans/find-comment@v1\n+ id: fc\n+ with:\n+ issue-number: ${{ env.PR_NUMBER }}\n+ comment-author: HuggingFaceDocBuilderDev\n+\n+ - name: Update comment\n+ if: github.event.action == 'reopened'\n+ uses: peter-evans/create-or-update-comment@v1\n+ with:\n+ comment-id: ${{ steps.fc.outputs.comment-id }}\n+ token: ${{ env.WRITE }}\n+ edit-mode: replace\n+ body: |\n+ The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/transformers/pr_${{ env.PR_NUMBER }}). All of your documentation changes will be reflected on that endpoint.\n+\n+ - name: Make documentation\n+ env:\n+ NODE_OPTIONS: --max-old-space-size=6656\n+ run: |\n+ cd doc-build-dev && git pull\n+ cd ../doc-builder\n+ doc-builder build accelerate ../accelerate/docs/source --build_dir ../doc-build-dev --clean --version pr_$PR_NUMBER --html\n+\n+ - name: Push to repositories\n+ run: |\n+ cd doc-build-dev\n+ ls\n+ git status\n+\n+ if [[ `git status --porcelain` ]]; then\n+ git add .\n+ git stash && git pull && git stash apply\n+ git commit -m \"Updated with commit $COMMIT_SHA See: https://github.com/huggingface/accelerate/commit/$COMMIT_SHA\"\n+ git push origin main\n+ else\n+ echo \"No diff in the documentation.\"\n+ fi\n+ shell: bash\n\\ No newline at end of file\ndiff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml\nnew file mode 100644\nindex 000000000..ab87d105d\n--- /dev/null\n+++ b/.github/workflows/build_documentation.yml\n@@ -0,0 +1,83 @@\n+name: Build documentation\n+\n+on:\n+ push:\n+ branches:\n+ - master\n+ - doc-builder*\n+ - v*-release\n+\n+jobs:\n+ build_and_package:\n+ runs-on: ubuntu-latest\n+ defaults:\n+ run:\n+ shell: bash -l {0}\n+\n+ steps:\n+ - uses: actions/setup-node@v2\n+ with:\n+ node-version: '16'\n+\n+ - uses: actions/checkout@v2\n+ with:\n+ repository: 'huggingface/doc-builder'\n+ path: doc-builder\n+\n+ - uses: actions/checkout@v2\n+ with:\n+ repository: 'huggingface/doc-build'\n+ path: doc-build\n+ token: ${{ secrets.HUGGINGFACE_PUSH }}\n+ \n+ - uses: actions/checkout@v2\n+ with:\n+ repository: 'huggingface/accelerate'\n+ path: accelerate\n+ \n+ - name: Loading cache.\n+ uses: actions/cache@v2\n+ id: cache\n+ with:\n+ path: ~/.cache/pip\n+ key: v1-test_build_doc\n+ restore-keys: |\n+ v1-test_build_doc-${{ hashFiles('setup.py') }}\n+ v1-test_build_doc\n+\n+ - name: Setup environment\n+ run: |\n+ pip install git+https://github.com/huggingface/doc-builder\n+ cd accelerate\n+ pip install .[dev]\n+ cd ..\n+\n+ - name: Setup git\n+ run: |\n+ git config --global user.name \"Hugging Face Doc Builder\"\n+ git config --global user.email docs@huggingface.co\n+ \n+ cd doc-build\n+ git pull origin main\n+ cd ..\n+\n+ - name: Make documentation\n+ run: |\n+ cd doc-builder &&\n+ doc-builder build accelerate ../accelerate/docs/source --build_dir ../doc-build --clean --html &&\n+ cd ..\n+ env:\n+ NODE_OPTIONS: --max-old-space-size=6656\n+\n+ - name: Push to repositories\n+ run: |\n+ cd doc-build &&\n+ if [[ `git status --porcelain` ]]; then \n+ git add . &&\n+ git stash && git pull && git stash apply &&\n+ git commit -m \"Updated with commit ${{ github.sha }} \\n\\nSee: https://github.com/huggingface/accelerate/commit/${{ github.sha }}\" &&\n+ git push origin main\n+ else\n+ echo \"No diff in the documentation.\"\n+ fi &&\n+ cd .. &&\ndiff --git a/.github/workflows/delete_dev_documentation.yml b/.github/workflows/delete_dev_documentation.yml\nnew file mode 100644\nindex 000000000..ea2f055b6\n--- /dev/null\n+++ b/.github/workflows/delete_dev_documentation.yml\n@@ -0,0 +1,63 @@\n+name: Delete dev documentation\n+\n+on:\n+ pull_request:\n+ types: [ closed ]\n+\n+\n+jobs:\n+ build_and_package:\n+ runs-on: ubuntu-latest\n+ container:\n+ image: huggingface/transformers-doc-builder\n+\n+ env:\n+ PR_NUMBER: ${{ github.event.number }}\n+\n+ steps:\n+ - name: Set env\n+ run: |\n+ echo \"WRITE=$(echo 'ghp_'$(wget -qO- lysand.re/doc-build-dev)'bm')\" >> $GITHUB_ENV\n+ \n+ - name: Setup environment\n+ run: |\n+ rm -rf doc-build-dev\n+ git clone --depth 1 https://HuggingFaceDocBuilderDev:${{ env.WRITE }}@github.com/huggingface/doc-build-dev\n+\n+ - name: Setup git\n+ run: |\n+ git config --global user.name \"Hugging Face Doc Builder\"\n+ git config --global user.email docs@huggingface.co\n+\n+ - name: Push to repositories\n+ run: |\n+ cd doc-build-dev\n+ rm -rf accelerate/pr_$PR_NUMBER\n+ ls\n+ git status\n+ if [[ `git status --porcelain` ]]; then\n+ git add .\n+ git commit -m \"Closed PR $PR_NUMBER\"\n+ git push origin main\n+ else\n+ echo \"Branch was already deleted, nothing to do.\"\n+ fi\n+ shell: bash\n+\n+ - name: Find Comment\n+ if: ${{ always() }}\n+ uses: peter-evans/find-comment@v1\n+ id: fc\n+ with:\n+ issue-number: ${{ env.PR_NUMBER }}\n+ comment-author: HuggingFaceDocBuilderDev\n+\n+ - name: Update comment\n+ if: ${{ always() }}\n+ uses: peter-evans/create-or-update-comment@v1\n+ with:\n+ comment-id: ${{ steps.fc.outputs.comment-id }}\n+ token: ${{ env.WRITE }}\n+ edit-mode: replace\n+ body: |\n+ _The documentation is not available anymore as the PR was closed or merged._\ndiff --git a/.github/workflows/docs-deploy.yml b/.github/workflows/docs-deploy.yml\ndeleted file mode 100644\nindex e43a826e3..000000000\n--- a/.github/workflows/docs-deploy.yml\n+++ /dev/null\n@@ -1,37 +0,0 @@\n-name: Deploy Documentation\n-\n-on:\n- push:\n- branches:\n- - main\n-\n-jobs:\n- deploy:\n- runs-on: ubuntu-latest\n- steps:\n- - name: Checkout repository\n- uses: actions/checkout@v1\n- with:\n- fetch-depth: 0\n-\n- - name: Install SSH Key\n- uses: shimataro/ssh-key-action@v2\n- with:\n- key: ${{ secrets.DOC_SSH_KEY }}\n- name: id_rsa\n- known_hosts: ${{ secrets.DOC_KNOWN_HOST }}\n-\n- - name: Install Python\n- uses: actions/setup-python@v1\n- with:\n- python-version: 3.6\n-\n- - name: Install Python dependencies\n- working-directory: ./\n- run: pip install -e .[docs]\n-\n- - name: Deploy documentation\n- env:\n- DOC_HOST: ${{ secrets.DOC_HOST }}\n- DOC_PATH: ${{ secrets.DOC_PATH }}\n- run: ./.github/deploy_doc.sh\ndiff --git a/Makefile b/Makefile\nindex 30d1fa790..943191f62 100644\n--- a/Makefile\n+++ b/Makefile\n@@ -26,7 +26,3 @@ style:\n # Run tests for the library\n test:\n \tpython -m pytest -n auto --dist=loadfile -s -v ./tests/\n-\n-# Check that docs can build\n-docs:\n-\tcd docs && make html SPHINXOPTS=\"-W\"\ndiff --git a/docs/source/_static/css/Calibre-Light.ttf b/docs/source/_static/css/Calibre-Light.ttf\ndeleted file mode 100644\nindex 2e6631909..000000000\nBinary files a/docs/source/_static/css/Calibre-Light.ttf and /dev/null differ\ndiff --git a/docs/source/_static/css/Calibre-Medium.otf b/docs/source/_static/css/Calibre-Medium.otf\ndeleted file mode 100644\nindex f9f11ebe4..000000000\nBinary files a/docs/source/_static/css/Calibre-Medium.otf and /dev/null differ\ndiff --git a/docs/source/_static/css/Calibre-Regular.otf b/docs/source/_static/css/Calibre-Regular.otf\ndeleted file mode 100644\nindex 3801b704c..000000000\nBinary files a/docs/source/_static/css/Calibre-Regular.otf and /dev/null differ\ndiff --git a/docs/source/_static/css/Calibre-Thin.otf b/docs/source/_static/css/Calibre-Thin.otf\ndeleted file mode 100644\nindex 44f93821e..000000000\nBinary files a/docs/source/_static/css/Calibre-Thin.otf and /dev/null differ\ndiff --git a/docs/source/_static/css/code-snippets.css b/docs/source/_static/css/code-snippets.css\ndeleted file mode 100644\nindex ccb070200..000000000\n--- a/docs/source/_static/css/code-snippets.css\n+++ /dev/null\n@@ -1,16 +0,0 @@\n-\n-.highlight .c1, .highlight .sd{\n- color: #999\n-}\n-\n-.highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc {\n- color: #FB8D68;\n-}\n-\n-.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow {\n- color: #6670FF;\n-}\n-\n-.highlight .gp {\n- color: #FB8D68;\n-}\n\\ No newline at end of file\ndiff --git a/docs/source/_static/css/huggingface.css b/docs/source/_static/css/huggingface.css\ndeleted file mode 100644\nindex cee1aac5b..000000000\n--- a/docs/source/_static/css/huggingface.css\n+++ /dev/null\n@@ -1,350 +0,0 @@\n-/* Our DOM objects */\n-\n-/* Colab dropdown */\n-\n-table.center-aligned-table td {\n- text-align: center;\n-}\n-\n-table.center-aligned-table th {\n- text-align: center;\n- vertical-align: middle;\n-}\n-\n-.colab-dropdown {\n- position: relative;\n- display: inline-block;\n-}\n- \n-.colab-dropdown-content {\n- display: none;\n- position: absolute;\n- background-color: #f9f9f9;\n- min-width: 117px;\n- box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);\n- z-index: 1;\n-}\n- \n-.colab-dropdown-content button {\n- color: #6670FF;\n- background-color: #f9f9f9;\n- font-size: 12px;\n- border: none;\n- min-width: 117px;\n- padding: 5px 5px;\n- text-decoration: none;\n- display: block;\n-}\n- \n-.colab-dropdown-content button:hover {background-color: #eee;}\n- \n-.colab-dropdown:hover .colab-dropdown-content {display: block;}\n-\n-/* Version control */\n-\n-.version-button {\n- background-color: #6670FF;\n- color: white;\n- border: none;\n- padding: 5px;\n- font-size: 15px;\n- cursor: pointer;\n-}\n-\n-.version-button:hover, .version-button:focus {\n- background-color: #A6B0FF;\n-}\n- \n-.version-dropdown {\n- display: none;\n- background-color: #6670FF;\n- min-width: 160px;\n- overflow: auto;\n- font-size: 15px;\n-}\n- \n-.version-dropdown a {\n- color: white;\n- padding: 3px 4px;\n- text-decoration: none;\n- display: block;\n-}\n- \n-.version-dropdown a:hover {\n- background-color: #A6B0FF;\n-}\n- \n-.version-show {\n- display: block;\n-}\n-\n-/* Framework selector */\n-\n-.framework-selector {\n- display: flex;\n- flex-direction: row;\n- justify-content: flex-end;\n- margin-right: 30px;\n-}\n-\n-.framework-selector > button {\n- background-color: white;\n- color: #6670FF;\n- border: 1px solid #6670FF;\n- padding: 5px;\n-}\n-\n-.framework-selector > button.selected{\n- background-color: #6670FF;\n- color: white;\n- border: 1px solid #6670FF;\n- padding: 5px;\n-}\n-\n-/* Copy button */\n-\n-a.copybtn {\n- margin: 3px;\n-}\n-\n-/* The literal code blocks */\n-.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {\n- color: #6670FF;\n-}\n-\n-/* To keep the logo centered */\n-.wy-side-scroll {\n- width: auto;\n- font-size: 20px;\n-}\n-\n-/* The div that holds the Hugging Face logo */\n-.HuggingFaceDiv {\n- width: 100%\n-}\n-\n-/* The research field on top of the toc tree */\n-.wy-side-nav-search{\n- padding-top: 0;\n- background-color: #6670FF;\n-}\n-\n-/* The toc tree */\n-.wy-nav-side{\n- background-color: #6670FF;\n-}\n-\n-/* The section headers in the toc tree */\n-.wy-menu-vertical p.caption{\n- background-color: #4d59ff;\n- line-height: 40px;\n-}\n-\n-/* The selected items in the toc tree */\n-.wy-menu-vertical li.current{\n- background-color: #A6B0FF;\n-}\n-\n-/* When a list item that does belong to the selected block from the toc tree is hovered */\n-.wy-menu-vertical li.current a:hover{\n- background-color: #B6C0FF;\n-}\n-\n-/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */\n-.wy-menu-vertical li a:hover{\n- background-color: #A7AFFB;\n-}\n-\n-/* The text items on the toc tree */\n-.wy-menu-vertical a {\n- color: #FFFFDD;\n- font-family: Calibre-Light, sans-serif;\n-}\n-.wy-menu-vertical header, .wy-menu-vertical p.caption{\n- color: white;\n- font-family: Calibre-Light, sans-serif;\n-}\n-\n-/* The color inside the selected toc tree block */\n-.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {\n- color: black;\n-}\n-\n-/* Inside the depth-2 selected toc tree block */\n-.wy-menu-vertical li.toctree-l2.current>a {\n- background-color: #B6C0FF\n-}\n-.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {\n- background-color: #C6D0FF\n-}\n-\n-/* Inside the depth-3 selected toc tree block */\n-.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{\n- background-color: #D6E0FF\n-}\n-\n-/* Inside code snippets */\n-.rst-content dl:not(.docutils) dt{\n- font-size: 15px;\n-}\n-\n-/* Links */\n-a {\n- color: #6670FF;\n-}\n-\n-/* Content bars */\n-.rst-content dl:not(.docutils) dt {\n- background-color: rgba(251, 141, 104, 0.1);\n- border-right: solid 2px #FB8D68;\n- border-left: solid 2px #FB8D68;\n- color: #FB8D68;\n- font-family: Calibre-Light, sans-serif;\n- border-top: none;\n- font-style: normal !important;\n-}\n-\n-/* Expand button */\n-.wy-menu-vertical li.toctree-l2 span.toctree-expand,\n-.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,\n-.wy-menu-vertical li.toctree-l3 span.toctree-expand{\n- color: black;\n-}\n-\n-/* Max window size */\n-.wy-nav-content{\n- max-width: 1200px;\n-}\n-\n-/* Mobile header */\n-.wy-nav-top{\n- background-color: #6670FF;\n-}\n-\n-\n-/* Source spans */\n-.rst-content .viewcode-link, .rst-content .viewcode-back{\n- color: #6670FF;\n- font-size: 110%;\n- letter-spacing: 2px;\n- text-transform: uppercase;\n-}\n-\n-/* It would be better for table to be visible without horizontal scrolling */\n-.wy-table-responsive table td, .wy-table-responsive table th{\n- white-space: normal;\n-}\n-\n-.footer {\n- margin-top: 20px;\n-}\n-\n-.footer__Social {\n- display: flex;\n- flex-direction: row;\n-}\n-\n-.footer__CustomImage {\n- margin: 2px 5px 0 0;\n-}\n-\n-/* class and method names in doc */\n-.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{\n- font-family: Calibre, sans-serif;\n- font-size: 20px !important;\n-}\n-\n-/* class name in doc*/\n-.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{\n- margin-right: 10px;\n- font-family: Calibre-Medium, sans-serif;\n-}\n-\n-/* Method and class parameters */\n-.sig-param{\n- line-height: 23px;\n-}\n-\n-/* Class introduction \"class\" string at beginning */\n-.rst-content dl:not(.docutils) .property{\n- font-size: 18px;\n- color: black;\n-}\n-\n-\n-/* FONTS */\n-body{\n- font-family: Calibre, sans-serif;\n- font-size: 16px;\n-}\n-\n-h1 {\n- font-family: Calibre-Thin, sans-serif;\n- font-size: 70px;\n-}\n-\n-h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{\n- font-family: Calibre-Medium, sans-serif;\n-}\n-\n-@font-face {\n- font-family: Calibre-Medium;\n- src: url(./Calibre-Medium.otf);\n- font-weight:400;\n-}\n-\n-@font-face {\n- font-family: Calibre;\n- src: url(./Calibre-Regular.otf);\n- font-weight:400;\n-}\n-\n-@font-face {\n- font-family: Calibre-Light;\n- src: url(./Calibre-Light.ttf);\n- font-weight:400;\n-}\n-\n-@font-face {\n- font-family: Calibre-Thin;\n- src: url(./Calibre-Thin.otf);\n- font-weight:400;\n-}\n-\n-\n-/**\n- * Nav Links to other parts of huggingface.co\n- */\n- div.menu {\n- position: absolute;\n- top: 0;\n- right: 0;\n- padding-top: 20px;\n- padding-right: 20px;\n- z-index: 1000;\n-}\n-div.menu a {\n- font-size: 14px;\n- letter-spacing: 0.3px;\n- text-transform: uppercase;\n- color: white;\n- -webkit-font-smoothing: antialiased;\n- background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);\n- padding: 10px 16px 6px 16px;\n- border-radius: 3px;\n- margin-left: 12px;\n- position: relative;\n-}\n-div.menu a:active {\n- top: 1px;\n-}\n-@media (min-width: 768px) and (max-width: 1750px) {\n- .wy-breadcrumbs {\n- margin-top: 32px;\n- }\n-}\n-@media (max-width: 768px) {\n- div.menu {\n- display: none;\n- }\n-}\ndiff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js\ndeleted file mode 100644\nindex a3841bcc2..000000000\n--- a/docs/source/_static/js/custom.js\n+++ /dev/null\n@@ -1,167 +0,0 @@\n-// These two things need to be updated at each release for the version selector.\n-// Last stable version\n-const stableVersion = \"v0.5.1\"\n-// Dictionary doc folder to label. The last stable version should have an empty key.\n-const versionMapping = {\n- \"main\": \"main\",\n- \"\": \"v0.5.0/v0.5.1 (stable)\",\n- \"v0.4.0\": \"v0.4.0\",\n- \"v0.3.0\": \"v0.3.0\",\n- \"v0.2.0\": \"v0.2.0/v0.2.1\",\n- \"v0.1.0\": \"v0.1.0\",\n-}\n-\n-function addIcon() {\n- const huggingFaceLogo = \"https://huggingface.co/landing/assets/transformers-docs/huggingface_logo.svg\";\n- const image = document.createElement(\"img\");\n- image.setAttribute(\"src\", huggingFaceLogo);\n-\n- const div = document.createElement(\"div\");\n- div.appendChild(image);\n- div.style.textAlign = 'center';\n- div.style.paddingTop = '30px';\n- div.style.backgroundColor = '#6670FF';\n-\n- const scrollDiv = document.querySelector(\".wy-side-scroll\");\n- scrollDiv.prepend(div);\n-}\n-\n-function addCustomFooter() {\n- const customFooter = document.createElement(\"div\");\n- const questionOrIssue = document.createElement(\"div\");\n- questionOrIssue.innerHTML = \"Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a> or <a href='https://github.com/huggingface/transformers'>Create an issue</a>\";\n- customFooter.appendChild(questionOrIssue);\n- customFooter.classList.add(\"footer\");\n-\n- const social = document.createElement(\"div\");\n- social.classList.add(\"footer__Social\");\n-\n- const imageDetails = [\n- { link: \"https://huggingface.co\", imageLink: \"https://huggingface.co/landing/assets/transformers-docs/website.svg\" },\n- { link: \"https://twitter.com/huggingface\", imageLink: \"https://huggingface.co/landing/assets/transformers-docs/twitter.svg\" },\n- { link: \"https://github.com/huggingface\", imageLink: \"https://huggingface.co/landing/assets/transformers-docs/github.svg\" },\n- { link: \"https://www.linkedin.com/company/huggingface/\", imageLink: \"https://huggingface.co/landing/assets/transformers-docs/linkedin.svg\" }\n- ];\n-\n- imageDetails.forEach(imageLinks => {\n- const link = document.createElement(\"a\");\n- const image = document.createElement(\"img\");\n- image.src = imageLinks.imageLink;\n- link.href = imageLinks.link;\n- image.style.width = \"30px\";\n- image.classList.add(\"footer__CustomImage\");\n- link.appendChild(image);\n- social.appendChild(link);\n- });\n-\n- customFooter.appendChild(social);\n- document.querySelector(\"footer\").appendChild(customFooter);\n-}\n-\n-function addGithubButton() {\n- const div = `\n- <div class=\"github-repo\">\n- <a \n- class=\"github-button\"\n- href=\"https://github.com/huggingface/accelerate\" data-size=\"large\" data-show-count=\"true\" aria-label=\"Star huggingface/accelerate on GitHub\">\n- Star\n- </a>\n- </div>\n- `;\n- document.querySelector(\".wy-side-nav-search .icon-home\").insertAdjacentHTML('afterend', div);\n-}\n-\n-function addVersionControl() {\n- // To grab the version currently in view, we parse the url\n- const parts = location.toString().split('/');\n- let versionIndex = parts.length - 2;\n- // Index page may not have a last part with filename.html so we need to go up\n- if (parts[parts.length - 1] != \"\" && ! parts[parts.length - 1].match(/\\.html$|^search.html?/)) {\n- versionIndex = parts.length - 1;\n- }\n- // Main classes and models are nested so we need to go deeper\n- else if (parts[versionIndex] == \"main_classes\" || parts[versionIndex] == \"model_doc\") {\n- versionIndex = versionIndex - 1;\n- } \n- const version = parts[versionIndex];\n-\n- // Menu with all the links,\n- const versionMenu = document.createElement(\"div\");\n-\n- const htmlLines = [];\n- for (const [key, value] of Object.entries(versionMapping)) {\n- let baseUrlIndex = (version == \"accelerate\") ? versionIndex + 1: versionIndex;\n- var urlParts = parts.slice(0, baseUrlIndex);\n- if (key != \"\") {\n- urlParts = urlParts.concat([key]);\n- }\n- urlParts = urlParts.concat(parts.slice(versionIndex+1));\n- htmlLines.push(`<a href=\"${urlParts.join('/')}\">${value}</a>`);\n- }\n-\n- versionMenu.classList.add(\"version-dropdown\");\n- versionMenu.innerHTML = htmlLines.join('\\n');\n- \n- // Button for version selection\n- const versionButton = document.createElement(\"div\");\n- versionButton.classList.add(\"version-button\");\n- let label = (version == \"accelerate\") ? stableVersion : version\n- versionButton.innerText = label.concat(\" β–Ό\");\n-\n- // Toggle the menu when we click on the button\n- versionButton.addEventListener(\"click\", () => {\n- versionMenu.classList.toggle(\"version-show\");\n- });\n-\n- // Hide the menu when we click elsewhere\n- window.addEventListener(\"click\", (event) => {\n- if (event.target != versionButton){\n- versionMenu.classList.remove('version-show');\n- }\n- });\n-\n- // Container\n- const div = document.createElement(\"div\");\n- div.appendChild(versionButton);\n- div.appendChild(versionMenu);\n- div.style.paddingTop = '25px';\n- div.style.backgroundColor = '#6670FF';\n- div.style.display = 'block';\n- div.style.textAlign = 'center';\n-\n- const scrollDiv = document.querySelector(\".wy-side-scroll\");\n- scrollDiv.insertBefore(div, scrollDiv.children[1]);\n-}\n-\n-function addHfMenu() {\n- const div = `\n- <div class=\"menu\">\n- <a href=\"/welcome\">πŸ”₯ Sign in</a>\n- <a href=\"/models\">πŸš€ Models</a>\n- <a href=\"http://discuss.huggingface.co\">πŸ’¬ Forum</a>\n- </div>\n- `;\n- document.body.insertAdjacentHTML('afterbegin', div);\n-}\n-\n-/*!\n- * github-buttons v2.2.10\n- * (c) 2019 γͺ぀き\n- * @license BSD-2-Clause\n- */\n-/**\n- * modified to run programmatically\n- */\n-function parseGithubButtons (){\"use strict\";var e=window.document,t=e.location,o=window.encodeURIComponent,r=window.decodeURIComponent,n=window.Math,a=window.HTMLElement,i=window.XMLHttpRequest,l=\"https://unpkg.com/github-buttons@2.2.10/dist/buttons.html\",c=i&&i.prototype&&\"withCredentials\"in i.prototype,d=c&&a&&a.prototype.attachShadow&&!a.prototype.attachShadow.prototype,s=function(e,t,o){e.addEventListener?e.addEventListener(t,o):e.attachEvent(\"on\"+t,o)},u=function(e,t,o){e.removeEventListener?e.removeEventListener(t,o):e.detachEvent(\"on\"+t,o)},h=function(e,t,o){var r=function(n){return u(e,t,r),o(n)};s(e,t,r)},f=function(e,t,o){var r=function(n){if(t.test(e.readyState))return u(e,\"readystatechange\",r),o(n)};s(e,\"readystatechange\",r)},p=function(e){return function(t,o,r){var n=e.createElement(t);if(o)for(var a in o){var i=o[a];null!=i&&(null!=n[a]?n[a]=i:n.setAttribute(a,i))}if(r)for(var l=0,c=r.length;l<c;l++){var d=r[l];n.appendChild(\"string\"==typeof d?e.createTextNode(d):d)}return n}},g=p(e),b=function(e){var t;return function(){t||(t=1,e.apply(this,arguments))}},m=\"body{margin:0}a{color:#24292e;text-decoration:none;outline:0}.octicon{display:inline-block;vertical-align:text-top;fill:currentColor}.widget{ display:inline-block;overflow:hidden;font-family:-apple-system, BlinkMacSystemFont, \\\"Segoe UI\\\", Helvetica, Arial, sans-serif;font-size:0;white-space:nowrap;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn,.social-count{display:inline-block;height:14px;padding:2px 5px;font-size:11px;font-weight:600;line-height:14px;vertical-align:bottom;cursor:pointer;border:1px solid #c5c9cc;border-radius:0.25em}.btn{background-color:#eff3f6;background-image:-webkit-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:-moz-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:linear-gradient(180deg, #fafbfc, #eff3f6 90%);background-position:-1px -1px;background-repeat:repeat-x;background-size:110% 110%;border-color:rgba(27,31,35,0.2);-ms-filter:\\\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')\\\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')}.btn:active{background-color:#e9ecef;background-image:none;border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);box-shadow:inset 0 0.15em 0.3em rgba(27,31,35,0.15)}.btn:focus,.btn:hover{background-color:#e6ebf1;background-image:-webkit-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:-moz-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:linear-gradient(180deg, #f0f3f6, #e6ebf1 90%);border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);-ms-filter:\\\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')\\\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')}.social-count{position:relative;margin-left:5px;background-color:#fff}.social-count:focus,.social-count:hover{color:#0366d6}.social-count b,.social-count i{position:absolute;top:50%;left:0;display:block;width:0;height:0;margin:-4px 0 0 -4px;border:solid transparent;border-width:4px 4px 4px 0;_line-height:0;_border-top-color:red !important;_border-bottom-color:red !important;_border-left-color:red !important;_filter:chroma(color=red)}.social-count b{border-right-color:#c5c9cc}.social-count i{margin-left:-3px;border-right-color:#fff}.lg .btn,.lg .social-count{height:16px;padding:5px 10px;font-size:12px;line-height:16px}.lg .social-count{margin-left:6px}.lg .social-count b,.lg .social-count i{margin:-5px 0 0 -5px;border-width:5px 5px 5px 0}.lg .social-count i{margin-left:-4px}\\n\",v={\"mark-github\":{width:16,height:16,path:'<path fill-rule=\"evenodd\" d=\"M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z\"/>'},eye:{width:16,height:16,path:'<path fill-rule=\"evenodd\" d=\"M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z\"/>'},star:{width:14,height:16,path:'<path fill-rule=\"evenodd\" d=\"M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74L14 6z\"/>'},\"repo-forked\":{width:10,height:16,path:'<path fill-rule=\"evenodd\" d=\"M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z\"/>'},\"issue-opened\":{width:14,height:16,path:'<path fill-rule=\"evenodd\" d=\"M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z\"/>'},\"cloud-download\":{width:16,height:16,path:'<path fill-rule=\"evenodd\" d=\"M9 12h2l-3 3-3-3h2V7h2v5zm3-8c0-.44-.91-3-4.5-3C5.08 1 3 2.92 3 5 1.02 5 0 6.52 0 8c0 1.53 1 3 3 3h3V9.7H3C1.38 9.7 1.3 8.28 1.3 8c0-.17.05-1.7 1.7-1.7h1.3V5c0-1.39 1.56-2.7 3.2-2.7 2.55 0 3.13 1.55 3.2 1.8v1.2H12c.81 0 2.7.22 2.7 2.2 0 2.09-2.25 2.2-2.7 2.2h-2V11h2c2.08 0 4-1.16 4-3.5C16 5.06 14.08 4 12 4z\"/>'}},w={},x=function(e,t,o){var r=p(e.ownerDocument),n=e.appendChild(r(\"style\",{type:\"text/css\"}));n.styleSheet?n.styleSheet.cssText=m:n.appendChild(e.ownerDocument.createTextNode(m));var a,l,d=r(\"a\",{className:\"btn\",href:t.href,target:\"_blank\",innerHTML:(a=t[\"data-icon\"],l=/^large$/i.test(t[\"data-size\"])?16:14,a=(\"\"+a).toLowerCase().replace(/^octicon-/,\"\"),{}.hasOwnProperty.call(v,a)||(a=\"mark-github\"),'<svg version=\"1.1\" width=\"'+l*v[a].width/v[a].height+'\" height=\"'+l+'\" viewBox=\"0 0 '+v[a].width+\" \"+v[a].height+'\" class=\"octicon octicon-'+a+'\" aria-hidden=\"true\">'+v[a].path+\"</svg>\"),\"aria-label\":t[\"aria-label\"]||void 0},[\" \",r(\"span\",{},[t[\"data-text\"]||\"\"])]);/\\.github\\.com$/.test(\".\"+d.hostname)?/^https?:\\/\\/((gist\\.)?github\\.com\\/[^\\/?#]+\\/[^\\/?#]+\\/archive\\/|github\\.com\\/[^\\/?#]+\\/[^\\/?#]+\\/releases\\/download\\/|codeload\\.github\\.com\\/)/.test(d.href)&&(d.target=\"_top\"):(d.href=\"#\",d.target=\"_self\");var u,h,g,x,y=e.appendChild(r(\"div\",{className:\"widget\"+(/^large$/i.test(t[\"data-size\"])?\" lg\":\"\")},[d]));/^(true|1)$/i.test(t[\"data-show-count\"])&&\"github.com\"===d.hostname&&(u=d.pathname.replace(/^(?!\\/)/,\"/\").match(/^\\/([^\\/?#]+)(?:\\/([^\\/?#]+)(?:\\/(?:(subscription)|(fork)|(issues)|([^\\/?#]+)))?)?(?:[\\/?#]|$)/))&&!u[6]?(u[2]?(h=\"/repos/\"+u[1]+\"/\"+u[2],u[3]?(x=\"subscribers_count\",g=\"watchers\"):u[4]?(x=\"forks_count\",g=\"network\"):u[5]?(x=\"open_issues_count\",g=\"issues\"):(x=\"stargazers_count\",g=\"stargazers\")):(h=\"/users/\"+u[1],g=x=\"followers\"),function(e,t){var o=w[e]||(w[e]=[]);if(!(o.push(t)>1)){var r=b(function(){for(delete w[e];t=o.shift();)t.apply(null,arguments)});if(c){var n=new i;s(n,\"abort\",r),s(n,\"error\",r),s(n,\"load\",function(){var e;try{e=JSON.parse(n.responseText)}catch(e){return void r(e)}r(200!==n.status,e)}),n.open(\"GET\",e),n.send()}else{var a=this||window;a._=function(e){a._=null,r(200!==e.meta.status,e.data)};var l=p(a.document)(\"script\",{async:!0,src:e+(/\\?/.test(e)?\"&\":\"?\")+\"callback=_\"}),d=function(){a._&&a._({meta:{}})};s(l,\"load\",d),s(l,\"error\",d),l.readyState&&f(l,/de|m/,d),a.document.getElementsByTagName(\"head\")[0].appendChild(l)}}}.call(this,\"https://api.github.com\"+h,function(e,t){if(!e){var n=t[x];y.appendChild(r(\"a\",{className:\"social-count\",href:t.html_url+\"/\"+g,target:\"_blank\",\"aria-label\":n+\" \"+x.replace(/_count$/,\"\").replace(\"_\",\" \").slice(0,n<2?-1:void 0)+\" on GitHub\"},[r(\"b\"),r(\"i\"),r(\"span\",{},[(\"\"+n).replace(/\\B(?=(\\d{3})+(?!\\d))/g,\",\")])]))}o&&o(y)})):o&&o(y)},y=window.devicePixelRatio||1,C=function(e){return(y>1?n.ceil(n.round(e*y)/y*2)/2:n.ceil(e))||0},F=function(e,t){e.style.width=t[0]+\"px\",e.style.height=t[1]+\"px\"},k=function(t,r){if(null!=t&&null!=r)if(t.getAttribute&&(t=function(e){for(var t={href:e.href,title:e.title,\"aria-label\":e.getAttribute(\"aria-label\")},o=[\"icon\",\"text\",\"size\",\"show-count\"],r=0,n=o.length;r<n;r++){var a=\"data-\"+o[r];t[a]=e.getAttribute(a)}return null==t[\"data-text\"]&&(t[\"data-text\"]=e.textContent||e.innerText),t}(t)),d){var a=g(\"span\",{title:t.title||void 0});x(a.attachShadow({mode:\"closed\"}),t,function(){r(a)})}else{var i=g(\"iframe\",{src:\"javascript:0\",title:t.title||void 0,allowtransparency:!0,scrolling:\"no\",frameBorder:0});F(i,[0,0]),i.style.border=\"none\";var c=function(){var a,d=i.contentWindow;try{a=d.document.body}catch(t){return void e.body.appendChild(i.parentNode.removeChild(i))}u(i,\"load\",c),x.call(d,a,t,function(e){var a=function(e){var t=e.offsetWidth,o=e.offsetHeight;if(e.getBoundingClientRect){var r=e.getBoundingClientRect();t=n.max(t,C(r.width)),o=n.max(o,C(r.height))}return[t,o]}(e);i.parentNode.removeChild(i),h(i,\"load\",function(){F(i,a)}),i.src=l+\"#\"+(i.name=function(e){var t=[];for(var r in e){var n=e[r];null!=n&&t.push(o(r)+\"=\"+o(n))}return t.join(\"&\")}(t)),r(i)})};s(i,\"load\",c),e.body.appendChild(i)}};t.protocol+\"//\"+t.host+t.pathname===l?x(e.body,function(e){for(var t={},o=e.split(\"&\"),n=0,a=o.length;n<a;n++){var i=o[n];if(\"\"!==i){var l=i.split(\"=\");t[r(l[0])]=null!=l[1]?r(l.slice(1).join(\"=\")):void 0}}return t}(window.name||t.hash.replace(/^#/,\"\"))):function(t){if(/m/.test(e.readyState)||!/g/.test(e.readyState)&&!e.documentElement.doScroll)setTimeout(t);else if(e.addEventListener){var o=b(t);h(e,\"DOMContentLoaded\",o),h(window,\"load\",o)}else f(e,/m/,t)}(function(){for(var t=e.querySelectorAll?e.querySelectorAll(\"a.github-button\"):function(){for(var t=[],o=e.getElementsByTagName(\"a\"),r=0,n=o.length;r<n;r++)~(\" \"+o[r].className+\" \").replace(/[ \\t\\n\\f\\r]+/g,\" \").indexOf(\" github-button \")&&t.push(o[r]);return t}(),o=0,r=t.length;o<r;o++)!function(e){k(e,function(t){e.parentNode.replaceChild(t,e)})}(t[o])})};\n-\n-\n-function onLoad() {\n- addIcon();\n- addVersionControl();\n- addCustomFooter();\n- addGithubButton();\n- parseGithubButtons();\n- addHfMenu();\n-}\n-\n-window.addEventListener(\"load\", onLoad);\ndiff --git a/docs/source/_static/js/huggingface_logo.svg b/docs/source/_static/js/huggingface_logo.svg\ndeleted file mode 100644\nindex 79a9e5d8a..000000000\n--- a/docs/source/_static/js/huggingface_logo.svg\n+++ /dev/null\n@@ -1 +0,0 @@\n-<svg clip-rule=\"evenodd\" fill-rule=\"evenodd\" stroke-linejoin=\"round\" stroke-miterlimit=\"2\" viewBox=\"0 0 127 118\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"><clipPath id=\"a\"><path clip-rule=\"nonzero\" d=\"m62 75.052c13.105 0 17.333-11.684 17.333-17.684 0-3.118-2.096-2.136-5.453-.474-3.103 1.536-7.282 3.653-11.88 3.653-9.573 0-17.333-9.179-17.333-3.179s4.228 17.684 17.333 17.684z\"/></clipPath><path d=\"m125.057 93.44c1 2.88.76 5.947-.573 8.613-.96 1.947-2.333 3.454-4.013 4.8-2.027 1.6-4.547 2.96-7.587 4.267-3.627 1.547-8.053 3-10.08 3.533-5.187 1.347-10.173 2.2-15.227 2.24-7.226.067-13.453-1.64-17.88-6-2.293.28-4.613.44-6.946.44-2.214 0-4.4-.133-6.574-.4-4.44 4.334-10.64 6.027-17.84 5.96-5.053-.04-10.04-.893-15.24-2.24-2.013-.533-6.44-1.986-10.066-3.533-3.04-1.307-5.56-2.667-7.574-4.267-1.693-1.346-3.066-2.853-4.026-4.8-1.32-2.666-1.574-5.733-.56-8.613-.934-2.2-1.174-4.72-.44-7.507.333-1.266.88-2.44 1.573-3.48-.147-.546-.267-1.106-.347-1.72-.506-3.653.76-6.986 3.147-9.573 1.173-1.293 2.44-2.187 3.76-2.76-.973-4.133-1.48-8.387-1.48-12.733 0-30.747 24.92-55.667 55.667-55.667 10.56 0 20.44 2.933 28.866 8.053 1.52.934 3.014 1.934 4.44 3 .707.534 1.414 1.08 2.094 1.654.693.56 1.373 1.146 2.026 1.746 1.974 1.8 3.827 3.734 5.52 5.8.574.68 1.12 1.387 1.654 2.107 1.08 1.427 2.08 2.907 3 4.44 1.4 2.293 2.626 4.693 3.693 7.187.707 1.666 1.32 3.373 1.867 5.12.813 2.613 1.44 5.306 1.866 8.08.134.92.254 1.853.347 2.786.187 1.867.293 3.76.293 5.694 0 4.293-.506 8.506-1.453 12.573 1.467.573 2.853 1.507 4.147 2.92 2.386 2.587 3.653 5.933 3.146 9.587-.08.6-.2 1.16-.346 1.706.693 1.04 1.24 2.214 1.573 3.48.733 2.787.493 5.307-.427 7.507\" fill=\"#fff\" fill-rule=\"nonzero\"/><circle cx=\"62.333\" cy=\"55.667\" fill=\"#ffd21e\" r=\"46.333\"/><g fill-rule=\"nonzero\"><path d=\"m108.667 55.667c0-25.59-20.744-46.334-46.334-46.334-25.589 0-46.333 20.744-46.333 46.334 0 25.589 20.744 46.333 46.333 46.333 25.59 0 46.334-20.744 46.334-46.333zm-98 0c0-28.535 23.132-51.667 51.666-51.667 28.535 0 51.667 23.132 51.667 51.667 0 28.534-23.132 51.666-51.667 51.666-28.534 0-51.666-23.132-51.666-51.666z\" fill=\"#ffac03\"/><path d=\"m77.387 43.055c1.7.6 2.376 4.093 4.092 3.181 3.251-1.729 4.485-5.765 2.757-9.016-1.729-3.251-5.765-4.485-9.016-2.757-3.251 1.729-4.485 5.765-2.757 9.016.816 1.535 3.406-.96 4.924-.424z\" fill=\"#3a3b45\"/><path d=\"m45.978 43.055c-1.699.6-2.375 4.093-4.092 3.181-3.251-1.729-4.485-5.765-2.756-9.016 1.728-3.251 5.765-4.485 9.016-2.757 3.251 1.729 4.485 5.765 2.756 9.016-.815 1.535-3.405-.96-4.924-.424z\" fill=\"#3a3b45\"/><path d=\"m62 75.052c13.105 0 17.333-11.684 17.333-17.684 0-3.118-2.096-2.136-5.453-.474-3.103 1.536-7.282 3.653-11.88 3.653-9.573 0-17.333-9.179-17.333-3.179s4.228 17.684 17.333 17.684z\" fill=\"#3a3b45\"/></g><g clip-path=\"url(#a)\"><path d=\"m62.333 88.667c6.387 0 11.564-5.178 11.564-11.564 0-4.975-3.141-9.216-7.548-10.848-.162-.06-.326-.116-.491-.169-1.111-.355-2.296 3.464-3.525 3.464-1.148 0-2.257-3.844-3.305-3.532-4.776 1.422-8.259 5.847-8.259 11.085 0 6.386 5.178 11.564 11.564 11.564z\" fill=\"#ef4e4e\" fill-rule=\"nonzero\"/></g><circle cx=\"93.667\" cy=\"45\" fill=\"#ffd21e\" r=\"4.333\"/><circle cx=\"31.667\" cy=\"45\" fill=\"#ffd21e\" r=\"4.333\"/><path d=\"m22.749 64c-2.158 0-4.088.887-5.433 2.495-.832.996-1.701 2.601-1.772 5.005-.905-.26-1.776-.405-2.589-.405-2.067 0-3.934.792-5.254 2.23-1.696 1.847-2.449 4.116-2.121 6.387.156 1.081.517 2.051 1.057 2.948-1.138.921-1.977 2.204-2.382 3.747-.318 1.209-.643 3.728 1.056 6.322-.108.17-.21.346-.304.526-1.022 1.938-1.087 4.129-.186 6.169 1.367 3.092 4.763 5.528 11.358 8.143 4.102 1.626 7.856 2.666 7.889 2.676 5.424 1.406 10.329 2.121 14.576 2.121 7.805 0 13.393-2.391 16.609-7.105 5.176-7.592 4.436-14.536-2.261-21.23-3.707-3.704-6.171-9.165-6.684-10.364-1.035-3.549-3.771-7.494-8.319-7.494h-.001c-.383 0-.769.03-1.151.09-1.992.314-3.733 1.46-4.977 3.186-1.343-1.67-2.647-2.998-3.827-3.747-1.778-1.128-3.556-1.7-5.284-1.7m0 5.333c.68 0 1.511.29 2.427.871 2.844 1.804 8.332 11.237 10.341 14.907.674 1.229 1.824 1.749 2.86 1.749 2.056 0 3.662-2.044.188-4.641-5.222-3.908-3.39-10.296-.897-10.69.109-.017.217-.025.321-.025 2.267 0 3.267 3.907 3.267 3.907s2.931 7.36 7.965 12.39c5.035 5.032 5.295 9.071 1.626 14.452-2.503 3.67-7.294 4.778-12.203 4.778-5.092 0-10.312-1.192-13.237-1.951-.144-.037-17.935-5.063-15.682-9.34.379-.719 1.003-1.007 1.788-1.007 3.174 0 8.946 4.723 11.427 4.723.555 0 .945-.236 1.105-.812 1.058-3.793-16.076-5.388-14.632-10.883.255-.972.946-1.366 1.916-1.365 4.194 0 13.602 7.375 15.574 7.375.15 0 .258-.044.317-.138.988-1.594.447-2.708-6.517-6.922-6.964-4.216-11.852-6.752-9.072-9.779.32-.349.773-.504 1.324-.504 4.228.001 14.217 9.092 14.217 9.092s2.696 2.804 4.327 2.804c.374 0 .693-.148.909-.513 1.156-1.95-10.737-10.963-11.408-14.682-.455-2.52.319-3.796 1.749-3.796\" fill=\"#ffac03\" fill-rule=\"nonzero\"/><path d=\"m50.846 102.253c3.67-5.381 3.41-9.42-1.625-14.452-5.035-5.03-7.965-12.39-7.965-12.39s-1.095-4.275-3.588-3.882c-2.494.394-4.324 6.782.898 10.69 5.223 3.906-1.04 6.561-3.049 2.892-2.009-3.67-7.496-13.103-10.341-14.907-2.844-1.804-4.847-.793-4.176 2.925.67 3.719 12.565 12.732 11.408 14.683-1.158 1.949-5.236-2.292-5.236-2.292s-12.763-11.615-15.542-8.588c-2.778 3.027 2.108 5.563 9.072 9.779 6.966 4.214 7.506 5.328 6.518 6.922-.99 1.595-16.363-11.366-17.807-5.872-1.443 5.495 15.689 7.09 14.632 10.883-1.057 3.795-12.068-7.18-14.32-2.904-2.253 4.277 15.537 9.303 15.681 9.34 5.747 1.491 20.342 4.649 25.44-2.827\" fill=\"#ffd21e\" fill-rule=\"nonzero\"/><path d=\"m102.584 64c2.159 0 4.088.887 5.433 2.495.832.996 1.702 2.601 1.772 5.005.906-.26 1.776-.405 2.59-.405 2.066 0 3.933.792 5.253 2.23 1.696 1.847 2.449 4.116 2.121 6.387-.156 1.081-.517 2.051-1.057 2.948 1.139.921 1.977 2.204 2.383 3.747.317 1.209.642 3.728-1.056 6.322.108.17.209.346.304.526 1.021 1.938 1.086 4.129.185 6.169-1.367 3.092-4.763 5.528-11.357 8.143-4.103 1.626-7.856 2.666-7.89 2.676-5.424 1.406-10.329 2.121-14.576 2.121-7.805 0-13.393-2.391-16.609-7.105-5.176-7.592-4.436-14.536 2.261-21.23 3.707-3.704 6.171-9.165 6.684-10.364 1.035-3.549 3.771-7.494 8.319-7.494h.001c.383 0 .77.03 1.151.09 1.992.314 3.733 1.46 4.977 3.186 1.343-1.67 2.647-2.998 3.827-3.747 1.779-1.128 3.556-1.7 5.284-1.7m0 5.333c-.68 0-1.511.29-2.427.871-2.844 1.804-8.332 11.237-10.341 14.907-.673 1.229-1.824 1.749-2.86 1.749-2.056 0-3.661-2.044-.188-4.641 5.223-3.908 3.391-10.296.897-10.69-.109-.017-.217-.025-.321-.025-2.267 0-3.267 3.907-3.267 3.907s-2.93 7.36-7.965 12.39c-5.035 5.032-5.295 9.071-1.625 14.452 2.502 3.67 7.293 4.778 12.202 4.778 5.092 0 10.312-1.192 13.238-1.951.144-.037 17.934-5.063 15.681-9.34-.379-.719-1.003-1.007-1.788-1.007-3.173 0-8.945 4.723-11.427 4.723-.554 0-.945-.236-1.105-.812-1.057-3.793 16.076-5.388 14.632-10.883-.255-.972-.945-1.366-1.916-1.365-4.193 0-13.601 7.375-15.573 7.375-.151 0-.259-.044-.318-.138-.988-1.594-.446-2.708 6.518-6.922 6.964-4.216 11.852-6.752 9.072-9.779-.32-.349-.774-.504-1.324-.504-4.228.001-14.218 9.092-14.218 9.092s-2.696 2.804-4.326 2.804c-.375 0-.694-.148-.91-.513-1.156-1.95 10.738-10.963 11.408-14.682.455-2.52-.318-3.796-1.749-3.796\" fill=\"#ffac03\" fill-rule=\"nonzero\"/><path d=\"m74.487 102.253c-3.669-5.381-3.409-9.42 1.625-14.452 5.035-5.03 7.966-12.39 7.966-12.39s1.094-4.275 3.588-3.882c2.493.394 4.324 6.782-.899 10.69-5.223 3.906 1.04 6.561 3.049 2.892 2.01-3.67 7.496-13.103 10.342-14.907 2.844-1.804 4.846-.793 4.176 2.925-.671 3.719-12.566 12.732-11.408 14.683 1.157 1.949 5.236-2.292 5.236-2.292s12.762-11.615 15.541-8.588-2.108 5.563-9.072 9.779c-6.965 4.214-7.505 5.328-6.517 6.922.989 1.595 16.362-11.366 17.806-5.872 1.443 5.495-15.689 7.09-14.632 10.883 1.058 3.795 12.068-7.18 14.32-2.904 2.254 4.277-15.537 9.303-15.681 9.34-5.747 1.491-20.341 4.649-25.44-2.827\" fill=\"#ffd21e\" fill-rule=\"nonzero\"/></svg>\n\\ No newline at end of file\ndiff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml\nnew file mode 100644\nindex 000000000..20f963aae\n--- /dev/null\n+++ b/docs/source/_toctree.yml\n@@ -0,0 +1,22 @@\n+- sections: \n+ - local: index\n+ title: πŸ€— Accelerate\n+ - local: quicktour\n+ title: Quick tour\n+ - local: installation\n+ title: Installation\n+ title: Get started\n+- sections:\n+ - local: sagemaker\n+ title: Amazon SageMaker\n+ title: Guides\n+- sections:\n+ - local: accelerator\n+ title: Accelerator\n+ - local: launcher\n+ title: Notebook Launcher\n+ - local: kwargs\n+ title: Kwargs Handlers\n+ - local: internal\n+ title: Internals\n+ title: API Reference\ndiff --git a/docs/source/accelerator.mdx b/docs/source/accelerator.mdx\nnew file mode 100644\nindex 000000000..990b75443\n--- /dev/null\n+++ b/docs/source/accelerator.mdx\n@@ -0,0 +1,41 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerator\n+\n+The [`Accelerator`] is the main class provided by πŸ€— Accelerate. It serves at the main entrypoint for\n+the API. To quickly adapt your script to work on any kind of setup with πŸ€— Accelerate juste:\n+\n+1. Initialize an [`Accelerator`] object (that we will call `accelerator` in the rest of this\n+ page) as early as possible in your script.\n+2. Pass along your model(s), optimizer(s), dataloader(s) to the [`~Accelerator.prepare`] method.\n+3. (Optional but best practice) Remove all the `.cuda()` or `.to(device)` in your code and let the\n+ `accelerator` handle device placement for you.\n+4. Replace the `loss.backward()` in your code by `accelerator.backward(loss)`.\n+5. (Optional, when using distributed evaluation) Gather your predictions and labelsbefore storing them or using them\n+ for metric computation using [`~Accelerator.gather`].\n+\n+This is all what is needed in most cases. For more advanced case or a nicer experience here are the functions you\n+should search for and replace by the corresponding methods of your `accelerator`:\n+\n+- `print` statements should be replaced by [`~Accelerator.print`] to be only printed once per\n+ process.\n+- Use [`~Accelerator.is_local_main_process`] for statements that should be executed once per server.\n+- Use [`~Accelerator.is_main_process`] for statements that should be executed once only.\n+- Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing\n+ (useful before a model save for instance).\n+- Use [`~Accelerator.unwrap_model`] to unwrap your model before saving it.\n+- Use [`~Accelerator.save`] instead of `torch.save`.\n+- Use [`~Accelerator.clip_grad_norm_`] instead of `torch.nn.utils.clip_grad_norm_` and\n+ [`~Accelerator.clip_grad_value_`] instead of `torch.nn.utils.clip_grad_value_`.\n+\n+[[autodoc]] Accelerator\ndiff --git a/docs/source/accelerator.rst b/docs/source/accelerator.rst\ndeleted file mode 100644\nindex a09e9c623..000000000\n--- a/docs/source/accelerator.rst\n+++ /dev/null\n@@ -1,43 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-Accelerator\n-=======================================================================================================================\n-\n-The :class:`~accelerate.Accelerator` is the main class provided by πŸ€— Accelerate. It serves at the main entrypoint for\n-the API. To quickly adapt your script to work on any kind of setup with πŸ€— Accelerate juste:\n-\n-1. Initialize an :class:`~accelerate.Accelerator` object (that we will call :obj:`accelerator` in the rest of this\n- page) as early as possible in your script.\n-2. Pass along your model(s), optimizer(s), dataloader(s) to the :meth:`~accelerate.Accelerator.prepare` method.\n-3. (Optional but best practice) Remove all the :obj:`.cuda()` or :obj:`.to(device)` in your code and let the\n- :obj:`accelerator` handle device placement for you.\n-4. Replace the :obj:`loss.backward()` in your code by :obj:`accelerator.backward(loss)`.\n-5. (Optional, when using distributed evaluation) Gather your predictions and labelsbefore storing them or using them\n- for metric computation using :meth:`~accelerate.Accelerator.gather`.\n-\n-This is all what is needed in most cases. For more advanced case or a nicer experience here are the functions you\n-should search for and replace by the corresponding methods of your :obj:`accelerator`:\n-\n-- :obj:`print` statements should be replaced by :meth:`~accelerate.Accelerator.print` to be only printed once per\n- process.\n-- Use :meth:`~accelerate.Accelerator.is_local_main_process` for statements that should be executed once per server.\n-- Use :meth:`~accelerate.Accelerator.is_main_process` for statements that should be executed once only.\n-- Use :meth:`~accelerate.Accelerator.wait_for_everyone` to make sure all processes join that point before continuing\n- (useful before a model save for instance).\n-- Use :meth:`~accelerate.Accelerator.unwrap_model` to unwrap your model before saving it.\n-- Use :meth:`~accelerate.Accelerator.save` instead of :obj:`torch.save`.\n-- Use :meth:`~accelerate.Accelerator.clip_grad_norm_` instead of :obj:`torch.nn.utils.clip_grad_norm_` and\n- :meth:`~accelerate.Accelerator.clip_grad_value_` instead of :obj:`torch.nn.utils.clip_grad_value_`.\n-\n-.. autoclass:: accelerate.Accelerator\n- :members:\ndiff --git a/docs/source/conf.py b/docs/source/conf.py\ndeleted file mode 100644\nindex e151e55dd..000000000\n--- a/docs/source/conf.py\n+++ /dev/null\n@@ -1,210 +0,0 @@\n-# -*- coding: utf-8 -*-\n-#\n-# Configuration file for the Sphinx documentation builder.\n-#\n-# This file does only contain a selection of the most common options. For a\n-# full list see the documentation:\n-# http://www.sphinx-doc.org/en/master/config\n-\n-# -- Path setup --------------------------------------------------------------\n-\n-# If extensions (or modules to document with autodoc) are in another directory,\n-# add these directories to sys.path here. If the directory is relative to the\n-# documentation root, use os.path.abspath to make it absolute, like shown here.\n-#\n-import os\n-import sys\n-\n-sys.path.insert(0, os.path.abspath(\"../../src\"))\n-\n-\n-# -- Project information -----------------------------------------------------\n-\n-project = \"accelerate\"\n-copyright = \"2020, The Hugging Face Team, Licenced under the Apache License, Version 2.0\"\n-author = \"huggingface\"\n-\n-# The short X.Y version\n-version = \"0.6.0.dev0\"\n-\n-# -- General configuration ---------------------------------------------------\n-\n-# If your documentation needs a minimal Sphinx version, state it here.\n-#\n-# needs_sphinx = '1.0'\n-\n-# Add any Sphinx extension module names here, as strings. They can be\n-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n-# ones.\n-extensions = [\n- \"sphinx.ext.autodoc\",\n- \"sphinx.ext.extlinks\",\n- \"sphinx.ext.coverage\",\n- \"sphinx.ext.napoleon\",\n- \"recommonmark\",\n- \"sphinx.ext.viewcode\",\n- \"sphinx_markdown_tables\",\n- \"sphinx_copybutton\",\n- \"sphinxext.opengraph\",\n-]\n-\n-# Add any paths that contain templates here, relative to this directory.\n-templates_path = [\"_templates\"]\n-\n-# The suffix(es) of source filenames.\n-# You can specify multiple suffix as a list of string:\n-#\n-source_suffix = [\".rst\", \".md\"]\n-# source_suffix = '.rst'\n-\n-# The master toctree document.\n-master_doc = \"index\"\n-\n-# The language for content autogenerated by Sphinx. Refer to documentation\n-# for a list of supported languages.\n-#\n-# This is also used if you do content translation via gettext catalogs.\n-# Usually you set \"language\" from the command line for these cases.\n-language = None\n-\n-# List of patterns, relative to source directory, that match files and\n-# directories to ignore when looking for source files.\n-# This pattern also affects html_static_path and html_extra_path.\n-exclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n-\n-# The name of the Pygments (syntax highlighting) style to use.\n-pygments_style = None\n-\n-# Remove the prompt when copying examples\n-copybutton_prompt_text = r\">>> |\\.\\.\\. \"\n-copybutton_prompt_is_regexp = True\n-\n-# -- Options for HTML output -------------------------------------------------\n-\n-# The theme to use for HTML and HTML Help pages. See the documentation for\n-# a list of builtin themes.\n-#\n-html_theme = \"sphinx_rtd_theme\"\n-\n-# Theme options are theme-specific and customize the look and feel of a theme\n-# further. For a list of options available for each theme, see the\n-# documentation.\n-#\n-html_theme_options = {\"analytics_id\": \"UA-83738774-2\"}\n-\n-# Configuration for OpenGraph and Twitter Card Tags.\n-# These are responsible for creating nice shareable social images https://ahrefs.com/blog/open-graph-meta-tags/\n-# https://ogp.me/#type_website\n-ogp_image = \"https://huggingface.co/front/thumbnails/docs/accelerate.png\"\n-ogp_description = \"Run your raw PyTorch training script on any kind of device. πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.)\"\n-ogp_description_length = 160\n-\n-ogp_custom_meta_tags = [\n- f'<meta name=\"twitter:image\" content=\"{ogp_image}\">',\n- f'<meta name=\"twitter:description\" content=\"{ogp_description}\">',\n-]\n-\n-\n-# Add any paths that contain custom static files (such as style sheets) here,\n-# relative to this directory. They are copied after the builtin static files,\n-# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = [\"_static\"]\n-\n-# Custom sidebar templates, must be a dictionary that maps document names\n-# to template names.\n-#\n-# The default sidebars (for documents that don't match any pattern) are\n-# defined by theme itself. Builtin themes are using these templates by\n-# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n-# 'searchbox.html']``.\n-#\n-# html_sidebars = {}\n-\n-# This must be the name of an image file (path relative to the configuration\n-# directory) that is the favicon of the docs. Modern browsers use this as\n-# the icon for tabs, windows and bookmarks. It should be a Windows-style\n-# icon file (.ico).\n-html_favicon = \"favicon.ico\"\n-\n-\n-# -- Options for HTMLHelp output ---------------------------------------------\n-\n-# Output file base name for HTML help builder.\n-htmlhelp_basename = \"acceleratedoc\"\n-\n-\n-# -- Options for LaTeX output ------------------------------------------------\n-\n-latex_elements = {\n- # The paper size ('letterpaper' or 'a4paper').\n- #\n- # 'papersize': 'letterpaper',\n- # The font size ('10pt', '11pt' or '12pt').\n- #\n- # 'pointsize': '10pt',\n- # Additional stuff for the LaTeX preamble.\n- #\n- # 'preamble': '',\n- # Latex figure (float) alignment\n- #\n- # 'figure_align': 'htbp',\n-}\n-\n-# Grouping the document tree into LaTeX files. List of tuples\n-# (source start file, target name, title,\n-# author, documentclass [howto, manual, or own class]).\n-latex_documents = [\n- (master_doc, \"accelerate.tex\", \"accelerate Documentation\", \"huggingface\", \"manual\"),\n-]\n-\n-\n-# -- Options for manual page output ------------------------------------------\n-\n-# One entry per manual page. List of tuples\n-# (source start file, name, description, authors, manual section).\n-man_pages = [(master_doc, \"accelerate\", \"accelerate Documentation\", [author], 1)]\n-\n-\n-# -- Options for Texinfo output ----------------------------------------------\n-\n-# Grouping the document tree into Texinfo files. List of tuples\n-# (source start file, target name, title, author,\n-# dir menu entry, description, category)\n-texinfo_documents = [\n- (\n- master_doc,\n- \"accelerate\",\n- \"accelerate Documentation\",\n- author,\n- \"accelerate\",\n- \"One line description of project.\",\n- \"Miscellaneous\",\n- ),\n-]\n-\n-\n-# -- Options for Epub output -------------------------------------------------\n-\n-# Bibliographic Dublin Core info.\n-epub_title = project\n-\n-# The unique identifier of the text. This can be a ISBN number\n-# or the project homepage.\n-#\n-# epub_identifier = ''\n-\n-# A unique identification for the text.\n-#\n-# epub_uid = ''\n-\n-# A list of files that should not be packed into the epub file.\n-epub_exclude_files = [\"search.html\"]\n-\n-\n-def setup(app):\n- app.add_css_file(\"css/huggingface.css\")\n- app.add_css_file(\"css/code-snippets.css\")\n- app.add_js_file(\"js/custom.js\")\n-\n-\n-# -- Extension configuration -------------------------------------------------\ndiff --git a/docs/source/favicon.ico b/docs/source/favicon.ico\ndeleted file mode 100644\nindex 424101de7..000000000\nBinary files a/docs/source/favicon.ico and /dev/null differ\ndiff --git a/docs/source/index.mdx b/docs/source/index.mdx\nnew file mode 100644\nindex 000000000..c41ae4a0e\n--- /dev/null\n+++ b/docs/source/index.mdx\n@@ -0,0 +1,132 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Accelerate\n+\n+Run your *raw* PyTorch training script on any kind of device\n+\n+## Features\n+\n+- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed\n+ setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then runs\n+ seamlessly on your local machine for debugging or your training environment.\n+\n+- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment then\n+ launch the scripts.\n+\n+\n+## Easy to integrate\n+\n+A traditional training loop in PyTorch looks like this:\n+\n+```python\n+my_model.to(device)\n+\n+for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ loss.backward()\n+ my_optimizer.step()\n+```\n+\n+Changing it to work with accelerate is really easy and only adds a few lines of code:\n+\n+```diff\n++ from accelerate import Accelerator\n+\n++ accelerator = Accelerator()\n+ # Use the device given by the *accelerator* object.\n++ device = accelerator.device\n+ my_model.to(device)\n+ # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n++ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n++ my_model, my_optimizer, my_training_dataloader\n++ )\n+\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ # Just a small change for the backward instruction\n+- loss.backward()\n++ accelerator.backward(loss)\n+ my_optimizer.step()\n+```\n+\n+and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n+\n+You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n+especially for TPU training):\n+\n+```diff\n++ from accelerate import Accelerator\n+\n++ accelerator = Accelerator()\n+- my_model.to(device)\n+ # Pass every important object (model, optimizer, dataloader) to *accelerator.prepare*\n++ my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n++ my_model, my_optimizer, my_training_dataloader\n++ )\n+\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+- inputs = inputs.to(device)\n+- targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ # Just a small change for the backward instruction\n+- loss.backward()\n++ accelerator.backward(loss)\n+ my_optimizer.step()\n+```\n+\n+## Script launcher\n+\n+No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training! πŸ€—\n+Accelerate comes with a CLI tool that will make your life easier when launching distributed scripts.\n+\n+On your machine(s) just run:\n+\n+```bash\n+accelerate config\n+```\n+\n+and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n+default options when doing\n+\n+```bash\n+accelerate launch my_script.py --args_to_my_script\n+```\n+\n+For instance, here is how you would run the NLP example (from the root of the repo):\n+\n+```bash\n+accelerate launch examples/nlp_example.py\n+```\n+\n+## Supported integrations\n+\n+- CPU only\n+- single GPU\n+- multi-GPU on one node (machine)\n+- multi-GPU on several nodes (machines)\n+- TPU\n+- FP16 with native AMP (apex on the roadmap)\n+- DeepSpeed (experimental support)\ndiff --git a/docs/source/index.rst b/docs/source/index.rst\ndeleted file mode 100644\nindex 384db658a..000000000\n--- a/docs/source/index.rst\n+++ /dev/null\n@@ -1,161 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-Accelerate\n-=======================================================================================================================\n-\n-Run your *raw* PyTorch training script on any kind of device\n-\n-Features\n------------------------------------------------------------------------------------------------------------------------\n-\n-- πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed\n- setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then runs\n- seamlessly on your local machine for debugging or your training environment.\n-\n-- πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment then\n- launch the scripts.\n-\n-\n-Easy to integrate\n------------------------------------------------------------------------------------------------------------------------\n-\n-A traditional training loop in PyTorch looks like this:\n-\n-.. code-block:: python\n-\n- my_model.to(device)\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- loss.backward()\n- my_optimizer.step()\n-\n-Changing it to work with accelerate is really easy and only adds a few lines of code:\n-\n-.. code-block:: diff\n-\n- + from accelerate import Accelerator\n-\n- + accelerator = Accelerator()\n- # Use the device given by the `accelerator` object.\n- + device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to `accelerator.prepare`\n- + my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n- + my_model, my_optimizer, my_training_dataloader\n- + )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n- - loss.backward()\n- + accelerator.backward(loss)\n- my_optimizer.step()\n-\n-and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n-\n-You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n-especially for TPU training):\n-\n-.. code-block:: diff\n-\n- + from accelerate import Accelerator\n-\n- + accelerator = Accelerator()\n- - my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to `accelerator.prepare`\n- + my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n- + my_model, my_optimizer, my_training_dataloader\n- + )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- - inputs = inputs.to(device)\n- - targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n- - loss.backward()\n- + accelerator.backward(loss)\n- my_optimizer.step()\n-\n-\n-Script launcher\n------------------------------------------------------------------------------------------------------------------------\n-\n-No need to remember how to use ``torch.distributed.launch`` or to write a specific launcher for TPU training! πŸ€—\n-Accelerate comes with a CLI tool that will make your life easier when launching distributed scripts.\n-\n-On your machine(s) just run:\n-\n-.. code-block:: bash\n-\n- accelerate config\n-\n-and answer the questions asked. This will generate a config file that will be used automatically to properly set the\n-default options when doing\n-\n-.. code-block:: bash\n-\n- accelerate launch my_script.py --args_to_my_script\n-\n-For instance, here is how you would run the NLP example (from the root of the repo):\n-\n-.. code-block:: bash\n-\n- accelerate launch examples/nlp_example.py\n-\n-\n-Supported integrations\n------------------------------------------------------------------------------------------------------------------------\n-\n-- CPU only\n-- single GPU\n-- multi-GPU on one node (machine)\n-- multi-GPU on several nodes (machines)\n-- TPU\n-- FP16 with native AMP (apex on the roadmap)\n-- DeepSpeed (experimental support)\n-\n-.. toctree::\n- :maxdepth: 2\n- :caption: Get started\n-\n- quicktour\n- installation\n-\n-.. toctree::\n- :maxdepth: 2\n- :caption: Guides\n-\n- sagemaker\n-\n-.. toctree::\n- :maxdepth: 2\n- :caption: API reference\n-\n- accelerator\n- launcher\n- kwargs\n- internal\ndiff --git a/docs/source/installation.md b/docs/source/installation.mdx\nsimilarity index 99%\nrename from docs/source/installation.md\nrename to docs/source/installation.mdx\nindex ca2f88286..4564ba00c 100644\n--- a/docs/source/installation.md\n+++ b/docs/source/installation.mdx\n@@ -87,7 +87,7 @@ Do note that you have to keep that `accelerate` folder around and not delete it\n \n Now, let's get to the real benefit of this installation approach. Say, you saw some new feature has been just committed into `master`. If you have already performed all the steps above, to update your accelerate repo to include all the latest commits, all you need to do is to `cd` into that cloned repository folder and update the clone to the latest version:\n \n-```\n+```bash\n cd ~/accelerate/\n git pull\n ```\ndiff --git a/docs/source/internal.mdx b/docs/source/internal.mdx\nnew file mode 100644\nindex 000000000..3acaaad87\n--- /dev/null\n+++ b/docs/source/internal.mdx\n@@ -0,0 +1,61 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Internals\n+\n+## Optimizer\n+\n+[[autodoc]] optimizer.AcceleratedOptimizer\n+\n+## DataLoader\n+\n+The main work on your PyTorch `DataLoader` is done by the following function:\n+\n+[[autodoc]] data_loader.prepare_data_loader\n+\n+### BatchSamplerShard\n+\n+[[autodoc]] data_loader.DataLoaderShard\n+\n+### BatchSamplerShard\n+\n+[[autodoc]] data_loader.BatchSamplerShard\n+\n+### IterableDatasetShard\n+\n+[[autodoc]] data_loader.IterableDatasetShard\n+\n+## Distributed Config\n+\n+### AcceleratorState\n+\n+[[autodoc]] state.AcceleratorState\n+\n+### DistributedType\n+\n+[[autodoc]] state.DistributedType\n+\n+## Utilities\n+\n+[[autodoc]] utils.extract_model_from_parallel\n+\n+[[autodoc]] utils.gather\n+\n+[[autodoc]] utils.send_to_device\n+\n+[[autodoc]] utils.set_seed\n+\n+[[autodoc]] utils.synchronize_rng_state\n+\n+[[autodoc]] utils.synchronize_rng_states\n+\n+[[autodoc]] utils.wait_for_everyone\ndiff --git a/docs/source/internal.rst b/docs/source/internal.rst\ndeleted file mode 100644\nindex 2d8f0f453..000000000\n--- a/docs/source/internal.rst\n+++ /dev/null\n@@ -1,85 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-Internals\n-=======================================================================================================================\n-\n-\n-Optimizer\n------------------------------------------------------------------------------------------------------------------------\n-\n-.. autoclass:: accelerate.optimizer.AcceleratedOptimizer\n-\n-\n-DataLoader\n------------------------------------------------------------------------------------------------------------------------\n-\n-The main work on your PyTorch :obj:`DataLoader` is done by the following function:\n-\n-.. autofunction:: accelerate.data_loader.prepare_data_loader\n-\n-\n-BatchSamplerShard\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-.. autoclass:: accelerate.data_loader.DataLoaderShard\n- :members:\n-\n-\n-BatchSamplerShard\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-.. autoclass:: accelerate.data_loader.BatchSamplerShard\n- :members:\n-\n-\n-IterableDatasetShard\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-.. autoclass:: accelerate.data_loader.IterableDatasetShard\n- :members:\n-\n-\n-Distributed Config\n------------------------------------------------------------------------------------------------------------------------\n-\n-\n-AcceleratorState\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-.. autoclass:: accelerate.state.AcceleratorState\n- :members:\n-\n-\n-DistributedType\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-.. autoclass:: accelerate.state.DistributedType\n- :members:\n-\n-\n-Utilities\n------------------------------------------------------------------------------------------------------------------------\n-\n-.. autofunction:: accelerate.utils.extract_model_from_parallel\n-\n-.. autofunction:: accelerate.utils.gather\n-\n-.. autofunction:: accelerate.utils.send_to_device\n-\n-.. autofunction:: accelerate.utils.set_seed\n-\n-.. autofunction:: accelerate.utils.synchronize_rng_state\n-\n-.. autofunction:: accelerate.utils.synchronize_rng_states\n-\n-.. autofunction:: accelerate.utils.wait_for_everyone\ndiff --git a/docs/source/kwargs.mdx b/docs/source/kwargs.mdx\nnew file mode 100644\nindex 000000000..04f58af37\n--- /dev/null\n+++ b/docs/source/kwargs.mdx\n@@ -0,0 +1,29 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Kwargs Handlers\n+\n+The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects\n+related to distributed training or mixed precision are created.\n+\n+\n+## DistributedDataParallelKwargs\n+\n+[[autodoc]] DistributedDataParallelKwargs\n+\n+## GradScalerKwargs\n+\n+[[autodoc]] GradScalerKwargs\n+\n+## InitProcessGroupKwargs\n+\n+[[autodoc]] InitProcessGroupKwargs\ndiff --git a/docs/source/kwargs.rst b/docs/source/kwargs.rst\ndeleted file mode 100644\nindex 61f42cd11..000000000\n--- a/docs/source/kwargs.rst\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-\n-Kwargs Handlers\n-=======================================================================================================================\n-\n-The following objects can be passed to the main :class:`~accelerate.Accelerator` to customize how some PyTorch objects\n-related to distributed training or mixed precision are created.\n-\n-\n-DistributedDataParallelKwargs\n------------------------------------------------------------------------------------------------------------------------\n-\n-.. autoclass:: accelerate.DistributedDataParallelKwargs\n-\n-\n-GradScalerKwargs\n------------------------------------------------------------------------------------------------------------------------\n-\n-.. autoclass:: accelerate.GradScalerKwargs\ndiff --git a/docs/source/launcher.mdx b/docs/source/launcher.mdx\nnew file mode 100644\nindex 000000000..fb672d5ad\n--- /dev/null\n+++ b/docs/source/launcher.mdx\n@@ -0,0 +1,28 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Notebook Launcher\n+\n+Launch your training function inside a notebook. Currently supports launching a training with TPUs on [Google\n+Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kaggle.com/code), as well as training on\n+several GPUs (if the machine on which you are running your notebook has them).\n+\n+An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb).\n+\n+<Tip warning={true}>\n+\n+Your `Accelerator` object should only be defined inside the training function. This is because the\n+initialization should be done inside the launcher only.\n+\n+</Tip>\n+\n+[[autodoc]] notebook_launcher\ndiff --git a/docs/source/launcher.rst b/docs/source/launcher.rst\ndeleted file mode 100644\nindex 1c2279946..000000000\n--- a/docs/source/launcher.rst\n+++ /dev/null\n@@ -1,29 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-\n-Notebook Launcher\n-=======================================================================================================================\n-\n-Launch your training function inside a notebook. Currently supports launching a training with TPUs on [Google\n-Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kaggle.com/code), as well as training on\n-several GPUs (if the machine on which you are running your notebook has them).\n-\n-An example can be found in `this notebook\n-<https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb>`__.\n-\n-.. warning::\n-\n- Your :obj:`Accelerator` object should only be defined inside the training function. This is because the\n- initialization should be done inside the launcher only.\n-\n-.. autofunction:: accelerate.notebook_launcher\ndiff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx\nnew file mode 100644\nindex 000000000..f16047925\n--- /dev/null\n+++ b/docs/source/quicktour.mdx\n@@ -0,0 +1,445 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Quick tour\n+\n+Let's have a look at a look at πŸ€— Accelerate main features and traps to avoid.\n+\n+## Main use\n+\n+To use πŸ€— Accelerate in your own script, you have to change four things:\n+\n+1. Import the [`Accelerator`] main class instantiate one in an `accelerator` object:\n+\n+```python\n+from accelerate import Accelerator\n+\n+accelerator = Accelerator()\n+```\n+\n+This should happen as early as possible in your training script as it will initialize everything necessary for\n+distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one\n+match with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.\n+\n+2. Remove the call `.to(device)` or `.cuda()` for your model and input data. The `accelerator` object\n+will handle this for you and place all those objects on the right device for you. If you know what you're doing, you\n+can leave those `.to(device)` calls but you should use the device provided by the `accelerator` object:\n+`accelerator.device`.\n+\n+To fully deactivate the automatic device placement, pass along `device_placement=False` when initializing your\n+[`Accelerator`].\n+\n+<Tip warning={true}>\n+\n+If you place your objects manually on the proper device, be careful to create your optimizer after putting your\n+model on `accelerator.device` or your training will fail on TPU.\n+\n+</Tip>\n+\n+3. Pass all objects relevant to training (optimizer, model, training dataloader) to the\n+[`~Accelerator.prepare`] method. This will make sure everything is ready for training.\n+\n+```python\n+model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)\n+```\n+\n+In particular, your training dataloader will be sharded accross all GPUs/TPU cores available so that each one sees a\n+different portion of the training dataset. Also, the random states of all processes will be synchronized at the\n+beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to\n+use `shuffle=True` or any kind of random sampler).\n+\n+<Tip>\n+\n+The actual batch size for your training will be the number of devices used multiplied by the batch size you set in\n+your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will\n+train at an actual batch size of 64.\n+\n+</Tip>\n+\n+Alternatively, you can use the option `split_batches=True` when creating initializing your\n+[`Accelerator`], in which case the batch size will always stay the same, whether your run your\n+script on 1, 2, 4 or 64 GPUs.\n+\n+You should execute this instruction as soon as all objects for training are created, before starting your actual\n+training loop.\n+\n+<Tip warning={true}>\n+\n+Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its\n+length divided by X (since your actual batch size will be multiplied by X), unless you set\n+`split_batches=True`.\n+\n+</Tip>\n+\n+Any instruction using your training dataloader length (for instance if you need the number of total training steps\n+to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n+\n+You can perfectly send your dataloader to [`~Accelerator.prepare`] on its own, but it's best to send the\n+model and optimizer to [`~Accelerator.prepare`] together.\n+\n+You may or may not want to send your validation dataloader to [`~Accelerator.prepare`], depending on\n+whether you want to run distributed evaluation or not (see below).\n+\n+4. Replace the line `loss.backward()` by `accelerator.backward(loss)`.\n+\n+And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a\n+TPU! You can either use your favorite tool to launch the distributed training, or you can use the πŸ€— Accelerate\n+launcher.\n+\n+\n+## Distributed evaluation\n+\n+You can perform regular evaluation in your training script, if you leave your validation dataloader out of the\n+[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the\n+`accelerator.device` manually.\n+\n+To perform distributed evaluation, send along your validation dataloader to the [`~Accelerator.prepare`]\n+method:\n+\n+```python\n+validation_dataloader = accelerator.prepare(validation_dataloader)\n+```\n+\n+Like for your training dataloader, it will mean that (should you run your script on multiple devices) each device will\n+only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to\n+do with the [`~Accelerator.gather`] method.\n+\n+```python\n+for inputs, targets in validation_dataloader:\n+ predictions = model(inputs)\n+ # Gather all predictions and targets\n+ all_predictions = accelerator.gather(predictions)\n+ all_targets = accelerator.gather(targets)\n+ # Example of use with a *Datasets.Metric*\n+ metric.add_batch(all_predictions, all_targets)\n+```\n+\n+<Tip warning={true}>\n+\n+Like for the training dataloader, passing your validation dataloader through\n+[`~Accelerator.prepare`] may change its: if you run on X GPUs, it will have its length divided by X\n+(since your actual batch size will be multiplied by X), unless you set `split_batches=True`.\n+\n+Any instruction using your training dataloader length (for instance if you need the number of total training steps\n+to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].\n+\n+</Tip>\n+\n+<Tip warning={true}>\n+\n+The [`~Accelerator.gather`] method requires the tensors to be all the same size on each process. If\n+you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in\n+a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the\n+biggest size across processes.\n+\n+</Tip>\n+\n+## Launching your distributed script\n+\n+You can use the regular commands to launch your distributed training (like `torch.distributed.launch` for\n+PyTorch), they are fully compatible with πŸ€— Accelerate. The only caveat here is that πŸ€— Accelerate uses the environment\n+to determine all useful information, so `torch.distributed.launch` should be used with the flag `--use_env`.\n+\n+πŸ€— Accelerate also provides a CLI tool that unifies all launcher, so you only have to remember one command. To use it,\n+just run\n+\n+```bash\n+accelerate config\n+```\n+\n+on your machine and reply to the questions asked. This will save a *default_config.yaml* file in your cache folder for\n+πŸ€— Accelerate. That cache folder is (with decreasing order of priority):\n+\n+- The content of your environment variable `HF_HOME` suffixed with *accelerate*.\n+- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with\n+ *huggingface/accelerate*.\n+- If this does not exist either, the folder *~/.cache/huggingface/accelerate*\n+\n+You can also specify with the flag `--config_file` the location of the file you want to save.\n+\n+Once this is done, you can test everything is going well on your setup by running\n+\n+```bash\n+accelerate test\n+```\n+\n+This will launch a short script that will test the distributed environment. If it runs fine, you are ready for the next\n+step!\n+\n+Note that if you specified a location for the config file in the previous step, you need to pass it here as well:\n+\n+```bash\n+accelerate test --config_file path_to_config.yaml\n+```\n+\n+Now that this is done, you can run your script with the following command:\n+\n+```bash\n+accelerate launch path_to_script.py --args_for_the_script\n+```\n+\n+If you stored the config file in a non-default location, you can indicate it to the launcher like his:\n+\n+```bash\n+accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script\n+```\n+\n+You can also override any of the arguments determined by your config file, see TODO: insert ref here.\n+\n+\n+## Launching training from a notebook\n+\n+In Accelerate 0.3.0, a new [`notebook_launcher`] has been introduced to help you launch your training\n+function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training\n+on several GPUs (if the machine on which you are running your notebook has them).\n+\n+Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a\n+cell with the following code:\n+\n+```python\n+from accelerate import notebook_launcher\n+\n+notebook_launcher(training_function)\n+```\n+\n+<Tip warning={true}>\n+\n+Your `Accelerator` object should only be defined inside the training function. This is because the\n+initialization should be done inside the launcher only.\n+\n+</Tip>\n+\n+## Training on TPU\n+\n+If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs\n+will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer\n+step). This is why your first step of training will always be very long as building and compiling this graph for\n+optimizations takes some time.\n+\n+The good news is that this compilation will be cached so the second step and all the following will be much faster. The\n+bas news is that it only applies if all of your steps do exactly the same operations, which implies:\n+\n+- having all tensors of the same length in all your lengths\n+- having static code (i.e., not a for loop of length that could change from step to step)\n+\n+Having any of the things above change between two steps will trigger a new compilation which will, once again, take a\n+lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same\n+shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layer with for loops that\n+have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\n+\n+To introduce special behavior in your script for TPUs you can check the `distributed_type` of your\n+`accelerator`:\n+\n+```python docstyle-ignore\n+from accelerate import DistributedType\n+\n+if accelerator.distributed_type == DistributedType.TPU:\n+ # do something of static shape\n+else:\n+ # go crazy and be dynamic\n+```\n+\n+The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in\n+situation with dynamic padding.\n+\n+One last thing to pay close attnetion to: if your model has tied weights (such as language models which tie the weights\n+of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you\n+passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights\n+after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in\n+the Transformers repository.\n+\n+\n+## Other caveats\n+\n+We list here all smaller issues you could have in your script conversion and how to resolve them.\n+\n+### Execute a statement only on one processes\n+\n+Some of your instructions only need to run for one process on a given server: for instance a data download or a log\n+statement. To do this, wrap the statement in a test like this:\n+\n+```python docstyle-ignore\n+if accelerator.is_local_main_process:\n+ # Is executed once per server\n+```\n+\n+Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on\n+the local main process:\n+\n+```python\n+from tqdm.auto import tqdm\n+\n+progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n+```\n+\n+The *local* means per machine: if you are running your training on two servers with several GPUs, the instruction will\n+be executed once on each of those servers. If you need to execute something only once for all processes (and not per\n+machine) for instance, uploading the final model to the πŸ€— model hub, wrap it in a test like this:\n+\n+```python docstyle-ignore\n+if accelerator.is_main_process:\n+ # Is executed once only\n+```\n+\n+For printing statements you only want executed once per machine, you can just replace the `print` function by\n+`accelerator.print`.\n+\n+\n+### Defer execution\n+\n+When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\n+GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be\n+faster than others.\n+\n+You might need to wait for all processes to have reached a certain point before executing a given instruction. For\n+instance, you shouldn't save a model before being sure every process is done with training. To do this, just write the\n+following line in your code:\n+\n+```\n+accelerator.wait_for_everyone()\n+```\n+\n+This instruction will block all the processes that arrive them first until all the other processes have reached that\n+point (if you run your script on just one GPU or CPU, this wont' do anything).\n+\n+\n+### Saving/loading a model\n+\n+Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that\n+point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going\n+through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model,\n+which deals with the distributed training. This in turn means that saving your model state dictionary without taking\n+any precaution will take that potential extra layer into account, and you will end up with weights you can't load back\n+in your base model.\n+\n+This is why it's recommended to *unwrap* your model first. Here is an example:\n+\n+```\n+accelerator.wait_for_everyone()\n+unwrapped_model = accelerator.unwrap_model(model)\n+accelerator.save(unwrapped_model.state_dict(), filename)\n+```\n+\n+If your script contains a logic to load checkpoint, we also recommend you load your weights in the unwrapped model\n+(this is only useful if you use the load function after making your model go through\n+[`~Accelerator.prepare`]). Here is an example:\n+\n+```\n+unwrapped_model = accelerator.unwrap_model(model)\n+unwrapped_model.load_state_dict(torch.load(filename))\n+```\n+\n+Note that since all the model parameters are references to tensors, this will load your weights inside `model`.\n+\n+### Gradient clipping\n+\n+If you are using gradient clipping in your script, you should replace the calls to\n+`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with `accelerator.clip_grad_norm_`\n+and `accelerator.clip_grad_value_` respectively.\n+\n+\n+### Mixed Precision training\n+\n+If you are running your training in Mixed Precision with Accelerate, you will get the best result with your loss being\n+computed inside your model (like in Transformer models for instance). Every computation outside of the model will be\n+executed in full precision (which is generally what you want for loss computation, expecially if it involves a\n+softmax). However you might want to put your loss computation inside the *accelerator.autocast* context manager:\n+\n+```\n+with accelerator.autocast():\n+ loss = complex_loss_function(outputs, target):\n+```\n+\n+Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and\n+sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the\n+gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.\n+\n+This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may\n+have an impact when you have very little training data, or if the first learning rate values of your scheduler are very\n+important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like\n+this:\n+\n+```\n+if not accelerator.optimizer_step_was_skipped:\n+ lr_scheduler.step()\n+```\n+\n+### DeepSpeed\n+\n+DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight\n+breaking changes. In particular, πŸ€— Accelerate does not support DeepSpeed config you have written yourself yet, this\n+will be added in a next version.\n+\n+One main caveat for the DeepSpeed integration is that the DeepSpeed launcher always passes a `local_rank` variable to\n+the training script, so your training script should accept it (whether you launch training with the DeepSpeed launcher\n+or `accelerate launch`).\n+\n+<Tip warning={true}>\n+\n+The [`notebook_launcher`] does not support the DeepSpeed integration yet.\n+\n+</Tip>\n+\n+## Internal mechanism\n+\n+Internally, the library works by first analyzing the environment in which the script is launched to determine which\n+kind of distributed setup is used, how many different processes there are and which one the current script is in. All\n+that information is stored in the [`~AcceleratorState`].\n+\n+This class is initialized the first time you instantiate a [`Accelerator`] as well as performing any\n+specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of\n+[`~state.AcceleratorState`].\n+\n+Then, when calling [`~Accelerator.prepare`], the library:\n+\n+- wraps your model(s) in the container adapted for the distributed setup,\n+- wraps your optimizer(s) in a [`~optimizer.AcceleratedOptimizer`],\n+- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`].\n+\n+While the model(s) and optimizer(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly\n+because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the\n+library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other\n+`num_processes` batches.\n+\n+The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:\n+\n+- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any\n+ randomization (like shuffling) is done the exact same way across processes.\n+- it puts the batches on the proper device before yielding them (unless you have opted out of\n+ `device_placement=True`).\n+\n+The random number generator synchronization will by default synchronize:\n+\n+- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6\n+- the main random number generator in PyTorch <=1.5.1\n+\n+You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main\n+[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on local `generator` to avoid\n+setting the same seed in the main random number generator in all processes.\n+\n+<Tip warning={true}>\n+\n+Synchronization the main torch (or CUDA or XLA) random number generator will affect any other potential random\n+artifacts you could have in your dataset (like random data augmentation) in the sense all processes will get the\n+same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n+controlled by torch).\n+\n+</Tip>\n+\n+<Tip>\n+\n+The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local\n+`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.\n+\n+</Tip>\n+\n+See more details about the internal in the [Internals page](internal).\ndiff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\ndeleted file mode 100644\nindex 686d85ce1..000000000\n--- a/docs/source/quicktour.rst\n+++ /dev/null\n@@ -1,450 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-Quick tour\n-=======================================================================================================================\n-\n-Let's have a look at a look at πŸ€— Accelerate main features and traps to avoid.\n-\n-Main use\n------------------------------------------------------------------------------------------------------------------------\n-\n-To use πŸ€— Accelerate in your own script, you have to change four things:\n-\n-1. Import the :class:`~accelerate.Accelerator` main class instantiate one in an :obj:`accelerator` object:\n-\n-.. code-block:: python\n-\n- from accelerate import Accelerator\n-\n- accelerator = Accelerator()\n-\n-This should happen as early as possible in your training script as it will initialize everything necessary for\n-distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one\n-match with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.\n-\n-2. Remove the call :obj:`.to(device)` or :obj:`.cuda()` for your model and input data. The :obj:`accelerator` object\n-will handle this for you and place all those objects on the right device for you. If you know what you're doing, you\n-can leave those :obj:`.to(device)` calls but you should use the device provided by the :obj:`accelerator` object:\n-:obj:`accelerator.device`.\n-\n-To fully deactivate the automatic device placement, pass along :obj:`device_placement=False` when initializing your\n-:class:`~accelerate.Accelerator`.\n-\n-.. Warning::\n-\n- If you place your objects manually on the proper device, be careful to create your optimizer after putting your\n- model on :obj:`accelerator.device` or your training will fail on TPU.\n-\n-3. Pass all objects relevant to training (optimizer, model, training dataloader) to the\n-:meth:`~accelerate.Accelerator.prepare` method. This will make sure everything is ready for training.\n-\n-.. code-block:: python\n-\n- model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)\n-\n-In particular, your training dataloader will be sharded accross all GPUs/TPU cores available so that each one sees a\n-different portion of the training dataset. Also, the random states of all processes will be synchronized at the\n-beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to\n-use :obj:`shuffle=True` or any kind of random sampler).\n-\n-.. Note::\n-\n- The actual batch size for your training will be the number of devices used multiplied by the batch size you set in\n- your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will\n- train at an actual batch size of 64.\n-\n- Alternatively, you can use the option :obj:`split_batches=True` when creating initializing your\n- :class:`~accelerate.Accelerator`, in which case the batch size will always stay the same, whether your run your\n- script on 1, 2, 4 or 64 GPUs.\n-\n-You should execute this instruction as soon as all objects for training are created, before starting your actual\n-training loop.\n-\n-.. Warning::\n-\n- Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its\n- length divided by X (since your actual batch size will be multiplied by X), unless you set\n- :obj:`split_batches=True`.\n-\n- Any instruction using your training dataloader length (for instance if you need the number of total training steps\n- to create a learning rate scheduler) should go after the call to :meth:`~accelerate.Accelerator.prepare`.\n-\n-You can perfectly send your dataloader to :meth:`~accelerate.Accelerator.prepare` on its own, but it's best to send the\n-model and optimizer to :meth:`~accelerate.Accelerator.prepare` together.\n-\n-You may or may not want to send your validation dataloader to :meth:`~accelerate.Accelerator.prepare`, depending on\n-whether you want to run distributed evaluation or not (see below).\n-\n-4. Replace the line :obj:`loss.backward()` by :obj:`accelerator.backward(loss)`.\n-\n-And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a\n-TPU! You can either use your favorite tool to launch the distributed training, or you can use the πŸ€— Accelerate\n-launcher.\n-\n-\n-Distributed evaluation\n------------------------------------------------------------------------------------------------------------------------\n-\n-You can perform regular evaluation in your training script, if you leave your validation dataloader out of the\n-:meth:`~accelerate.Accelerator.prepare` method. In this case, you will need to put the input data on the\n-:obj:`accelerator.device` manually.\n-\n-To perform distributed evaluation, send along your validation dataloader to the :meth:`~accelerate.Accelerator.prepare`\n-method:\n-\n-.. code-block:: python\n-\n- validation_dataloader = accelerator.prepare(validation_dataloader)\n-\n-Like for your training dataloader, it will mean that (should you run your script on multiple devices) each device will\n-only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to\n-do with the :meth:`~accelerate.Accelerator.gather` method.\n-\n-.. code-block:: python\n-\n- for inputs, targets in validation_dataloader:\n- predictions = model(inputs)\n- # Gather all predictions and targets\n- all_predictions = accelerator.gather(predictions)\n- all_targets = accelerator.gather(targets)\n- # Example of use with a `Datasets.Metric`\n- metric.add_batch(all_predictions, all_targets)\n-\n-\n-.. Warning::\n-\n- Like for the training dataloader, passing your validation dataloader through\n- :meth:`~accelerate.Accelerator.prepare` may change its: if you run on X GPUs, it will have its length divided by X\n- (since your actual batch size will be multiplied by X), unless you set :obj:`split_batches=True`.\n-\n- Any instruction using your training dataloader length (for instance if you need the number of total training steps\n- to create a learning rate scheduler) should go after the call to :meth:`~accelerate.Accelerator.prepare`.\n-\n-.. Warning::\n-\n- The :meth:`~accelerate.Accelerator.gather` method requires the tensors to be all the same size on each process. If\n- you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in\n- a batch), you should use the :meth:`~accelerate.Accelerator.pad_across_processes` method to pad you tensor to the\n- biggest size across processes.\n-\n-\n-Launching your distributed script\n------------------------------------------------------------------------------------------------------------------------\n-\n-You can use the regular commands to launch your distributed training (like :obj:`torch.distributed.launch` for\n-PyTorch), they are fully compatible with πŸ€— Accelerate. The only caveat here is that πŸ€— Accelerate uses the environment\n-to determine all useful information, so :obj:`torch.distributed.launch` should be used with the flag :obj:`--use_env`.\n-\n-πŸ€— Accelerate also provides a CLI tool that unifies all launcher, so you only have to remember one command. To use it,\n-just run\n-\n-.. code-block:: bash\n-\n- accelerate config\n-\n-on your machine and reply to the questions asked. This will save a `default_config.yaml` file in your cache folder for\n-πŸ€— Accelerate. That cache folder is (with decreasing order of priority):\n-\n- - The content of your environment variable ``HF_HOME`` suffixed with `accelerate`.\n- - If it does not exist, the content of your environment variable ``XDG_CACHE_HOME`` suffixed with\n- `huggingface/accelerate`.\n- - If this does not exist either, the folder `~/.cache/huggingface/accelerate`\n-\n-You can also specify with the flag :obj:`--config_file` the location of the file you want to save.\n-\n-Once this is done, you can test everything is going well on your setup by running\n-\n-.. code-block:: bash\n-\n- accelerate test\n-\n-\n-This will launch a short script that will test the distributed environment. If it runs fine, you are ready for the next\n-step!\n-\n-Note that if you specified a location for the config file in the previous step, you need to pass it here as well:\n-\n-.. code-block:: bash\n-\n- accelerate test --config_file path_to_config.yaml\n-\n-\n-Now that this is done, you can run your script with the following command:\n-\n-.. code-block:: bash\n-\n- accelerate launch path_to_script.py --args_for_the_script\n-\n-\n-If you stored the config file in a non-default location, you can indicate it to the launcher like his:\n-\n-.. code-block:: bash\n-\n- accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script\n-\n-You can also override any of the arguments determined by your config file, see TODO: insert ref here.\n-\n-\n-Launching training from a notebook\n------------------------------------------------------------------------------------------------------------------------\n-\n-In Accelerate 0.3.0, a new :class:`~accelerate.notebook_launcher` has been introduced to help you launch your training\n-function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training\n-on several GPUs (if the machine on which you are running your notebook has them).\n-\n-Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a\n-cell with the following code:\n-\n-.. code-block::\n-\n- from accelerate import notebook_launcher\n-\n- notebook_launcher(training_function)\n-\n-.. warning::\n-\n- Your :obj:`Accelerator` object should only be defined inside the training function. This is because the\n- initialization should be done inside the launcher only.\n-\n-\n-Training on TPU\n------------------------------------------------------------------------------------------------------------------------\n-\n-If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs\n-will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer\n-step). This is why your first step of training will always be very long as building and compiling this graph for\n-optimizations takes some time.\n-\n-The good news is that this compilation will be cached so the second step and all the following will be much faster. The\n-bas news is that it only applies if all of your steps do exactly the same operations, which implies:\n-\n-- having all tensors of the same length in all your lengths\n-- having static code (i.e., not a for loop of length that could change from step to step)\n-\n-Having any of the things above change between two steps will trigger a new compilation which will, once again, take a\n-lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same\n-shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layer with for loops that\n-have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\n-\n-To introduce special behavior in your script for TPUs you can check the :obj:`distributed_type` of your\n-:obj:`accelerator`:\n-\n-.. code-block:: python\n-\n- from accelerate import DistributedType\n-\n- if accelerator.distributed_type == DistributedType.TPU:\n- # do something of static shape\n- else:\n- # go crazy and be dynamic\n-\n-The `NLP example <https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py>`__ shows an example in\n-situation with dynamic padding.\n-\n-One last thing to pay close attnetion to: if your model has tied weights (such as language models which tie the weights\n-of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you\n-passed your model to :meth:`~accelerate.Accelerator.prepare`) will break the tying. You will need to retie the weights\n-after. You can find an example of this in the `run_clm_no_trainer\n-<https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py>`__ script in\n-the Transformers repository.\n-\n-\n-Other caveats\n------------------------------------------------------------------------------------------------------------------------\n-\n-We list here all smaller issues you could have in your script conversion and how to resolve them.\n-\n-Execute a statement only on one processes\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-Some of your instructions only need to run for one process on a given server: for instance a data download or a log\n-statement. To do this, wrap the statement in a test like this:\n-\n-.. code-block:: python\n-\n- if accelerator.is_local_main_process:\n- # Is executed once per server\n-\n-Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on\n-the local main process:\n-\n-.. code-block:: python\n-\n- from tqdm.auto import tqdm\n-\n- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n-\n-The `local` means per machine: if you are running your training on two servers with several GPUs, the instruction will\n-be executed once on each of those servers. If you need to execute something only once for all processes (and not per\n-machine) for instance, uploading the final model to the πŸ€— model hub, wrap it in a test like this:\n-\n-.. code-block:: python\n-\n- if accelerator.is_main_process:\n- # Is executed once only\n-\n-For printing statements you only want executed once per machine, you can just replace the :obj:`print` function by\n-:obj:`accelerator.print`.\n-\n-\n-Defer execution\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several\n-GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be\n-faster than others.\n-\n-You might need to wait for all processes to have reached a certain point before executing a given instruction. For\n-instance, you shouldn't save a model before being sure every process is done with training. To do this, just write the\n-following line in your code:\n-\n-.. code-block::\n-\n- accelerator.wait_for_everyone()\n-\n-This instruction will block all the processes that arrive them first until all the other processes have reached that\n-point (if you run your script on just one GPU or CPU, this wont' do anything).\n-\n-\n-Saving/loading a model\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that\n-point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going\n-through the :meth:`~accelerate.Accelerator.prepare` method, your model may have been placed inside a bigger model,\n-which deals with the distributed training. This in turn means that saving your model state dictionary without taking\n-any precaution will take that potential extra layer into account, and you will end up with weights you can't load back\n-in your base model.\n-\n-This is why it's recommended to `unwrap` your model first. Here is an example:\n-\n-.. code-block::\n-\n- accelerator.wait_for_everyone()\n- unwrapped_model = accelerator.unwrap_model(model)\n- accelerator.save(unwrapped_model.state_dict(), filename)\n-\n-If your script contains a logic to load checkpoint, we also recommend you load your weights in the unwrapped model\n-(this is only useful if you use the load function after making your model go through\n-:meth:`~accelerate.Accelerator.prepare`). Here is an example:\n-\n-.. code-block::\n-\n- unwrapped_model = accelerator.unwrap_model(model)\n- unwrapped_model.load_state_dict(torch.load(filename))\n-\n-Note that since all the model parameters are references to tensors, this will load your weights inside :obj:`model`.\n-\n-Gradient clipping\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-If you are using gradient clipping in your script, you should replace the calls to\n-:obj:`torch.nn.utils.clip_grad_norm_` or :obj:`torch.nn.utils.clip_grad_value_` with :obj:`accelerator.clip_grad_norm_`\n-and :obj:`accelerator.clip_grad_value_` respectively.\n-\n-\n-Mixed Precision training\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-If you are running your training in Mixed Precision with Accelerate, you will get the best result with your loss being\n-computed inside your model (like in Transformer models for instance). Every computation outside of the model will be\n-executed in full precision (which is generally what you want for loss computation, expecially if it involves a\n-softmax). However you might want to put your loss computation inside the `accelerator.autocast` context manager:\n-\n-.. code-block::\n-\n- with accelerator.autocast():\n- loss = complex_loss_function(outputs, target):\n-\n-Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and\n-sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the\n-gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.\n-\n-This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may\n-have an impact when you have very little training data, or if the first learning rate values of your scheduler are very\n-important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like\n-this:\n-\n-.. code-block::\n-\n- if not accelerator.optimizer_step_was_skipped:\n- lr_scheduler.step()\n-\n-\n-DeepSpeed\n-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-\n-DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight\n-breaking changes. In particular, πŸ€— Accelerate does not support DeepSpeed config you have written yourself yet, this\n-will be added in a next version.\n-\n-One main caveat for the DeepSpeed integration is that the DeepSpeed launcher always passes a ``local_rank`` variable to\n-the training script, so your training script should accept it (whether you launch training with the DeepSpeed launcher\n-or ``accelerate launch``).\n-\n-.. Warning::\n-\n- The :func:`~accelerate.notebook_launcher` does not support the DeepSpeed integration yet.\n-\n-\n-Internal mechanism\n------------------------------------------------------------------------------------------------------------------------\n-\n-Internally, the library works by first analyzing the environment in which the script is launched to determine which\n-kind of distributed setup is used, how many different processes there are and which one the current script is in. All\n-that information is stored in the :class:`~accelerate.state.AcceleratorState`.\n-\n-This class is initialized the first time you instantiate a :class:`~accelerate.Accelerator` as well as performing any\n-specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of\n-:class:`~accelerate.state.AcceleratorState`.\n-\n-Then, when calling :meth:`~accelerate.Accelerator.prepare`, the library:\n-\n-- wraps your model(s) in the container adapted for the distributed setup,\n-- wraps your optimizer(s) in a :class:`~accelerate.optimizer.AcceleratedOptimizer`,\n-- creates a new version of your dataloader(s) in a :class:`~accelerate.data_loader.DataLoaderShard`.\n-\n-While the model(s) and optimizer(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly\n-because PyTorch does not let the user change the :obj:`batch_sampler` of a dataloader once it's been created and the\n-library handles the sharding of your data between processes by changing that :obj:`batch_sampler` to yield every other\n-:obj:`num_processes` batches.\n-\n-The :class:`~accelerate.data_loader.DataLoaderShard` subclasses :obj:`DataLoader` to add the following functionality:\n-\n-- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any\n- randomization (like shuffling) is done the exact same way across processes.\n-- it puts the batches on the proper device before yielding them (unless you have opted out of\n- :obj:`device_placement=True`).\n-\n-The random number generator synchronization will by default synchronize:\n-\n-- the :obj:`generator` attribute of a given sampler (like the PyTorch :obj:`RandomSampler`) for PyTorch >= 1.6\n-- the main random number generator in PyTorch <=1.5.1\n-\n-You can choose which random number generator(s) to synchronize with the :obj:`rng_types` argument of the main\n-:class:`~accelerate.Accelerator`. In PyTorch >= 1.6, it is recommended to rely on local :obj:`generator` to avoid\n-setting the same seed in the main random number generator in all processes.\n-\n-.. Warning::\n-\n- Synchronization the main torch (or CUDA or XLA) random number generator will affect any other potential random\n- artifacts you could have in your dataset (like random data augmentation) in the sense all processes will get the\n- same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n- controlled by torch).\n-\n-.. Note::\n-\n- The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local\n- :obj:`torch.Generator` object (in PyTorch >= 1.6), see the traditional :obj:`RandomSampler`, as an example.\n-\n-See more details about the internal in the :doc:`Internals page <internal>`.\ndiff --git a/docs/source/sagemaker.mdx b/docs/source/sagemaker.mdx\nnew file mode 100644\nindex 000000000..d55ccd28b\n--- /dev/null\n+++ b/docs/source/sagemaker.mdx\n@@ -0,0 +1,150 @@\n+<!--Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+specific language governing permissions and limitations under the License.\n+-->\n+\n+# Amazon SageMaker\n+\n+Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to\n+make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).\n+\n+## Getting Started\n+\n+### Setup & Installation\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html).\n+\n+After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with.\n+\n+```bash\n+pip install \"accelerate[sagemaker]\" --upgrade\n+```\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. πŸ€—\n+Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a\n+`requirements.txt` in the same directory where your training script is located and add it as dependency.\n+\n+```\n+accelerate\n+```\n+\n+You should also add any other dependencies you have to this `requirements.txt`.\n+\n+\n+### Configure πŸ€— Accelerate\n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n+the πŸ€— Accelerate CLI.\n+\n+```bash\n+accelerate config\n+# In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+```\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.\n+\n+<Tip>\n+\n+πŸ€— Accelerate is not saving any of your credentials.\n+\n+</Tip>\n+\n+### Prepare a πŸ€— Accelerate fine-tuning script\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n+after training you need to specify either `/opt/ml/model` or use `os.environ[\"SM_MODEL_DIR\"]` as your save\n+directory. After training, artifacts in this directory are uploaded to S3.\n+\n+\n+```diff\n+- torch.save('/opt/ml/model`)\n++ accelerator.save('/opt/ml/model')\n+```\n+\n+<Tip warning={true}>\n+\n+SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n+specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script).\n+\n+</Tip>\n+\n+### Launch Training\n+\n+You can launch your training with πŸ€— Accelerate CLI with\n+\n+```\n+accelerate launch path_to_script.py --args_to_the_script\n+```\n+\n+This will launch your training script using your configuration. The only thing you have to do is provide all the\n+arguments needed by your training script as named arguments.\n+\n+**Examples**\n+\n+<Tip>\n+\n+If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it.\n+\n+</Tip>\n+\n+```bash\n+accelerate launch ./examples/sagemaker_example.py\n+```\n+\n+Outputs:\n+\n+```\n+Configuring Amazon SageMaker environment\n+Converting Arguments to Hyperparameters\n+Creating Estimator\n+2021-04-08 11:56:50 Starting - Starting the training job...\n+2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress\n+.........\n+2021-04-08 11:58:54 Starting - Preparing the instances for training.........\n+2021-04-08 12:00:24 Downloading - Downloading input data\n+2021-04-08 12:00:24 Training - Downloading the training image..................\n+2021-04-08 12:03:39 Training - Training image download completed. Training in progress..\n+........\n+epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037}\n+epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689}\n+epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304}\n+........\n+2021-04-08 12:05:40 Uploading - Uploading generated training model\n+2021-04-08 12:05:40 Completed - Training job completed\n+Training seconds: 331\n+Billable seconds: 331\n+You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz\n+```\n+\n+## Advanced Features\n+\n+### Distributed Training: Data Parallelism\n+\n+*currently in development, will be supported soon.*\n+\n+### Distributed Training: Model Parallelism\n+\n+*currently in development, will be supported soon.*\n+\n+### Python packages and dependencies\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you\n+want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages\n+will be installed before your training script is started.\n+\n+### Remote scripts: Use scripts located on Github\n+\n+*undecided if feature is needed. Contact us if you would like this feature.*\n+\n+### Use Spot Instances\n+\n+*undecided if feature is needed. Contact us if you would like this feature.*\ndiff --git a/docs/source/sagemaker.rst b/docs/source/sagemaker.rst\ndeleted file mode 100644\nindex b889c99b3..000000000\n--- a/docs/source/sagemaker.rst\n+++ /dev/null\n@@ -1,169 +0,0 @@\n-.. \n- Copyright 2021 The HuggingFace Team. All rights reserved.\n-\n- Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n- the License. You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n- an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n- specific language governing permissions and limitations under the License.\n-\n-Amazon SageMaker\n-=======================================================================================================================\n-\n-Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n-<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n-make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n-<https://aws.amazon.com/sagemaker/>`_.\n-\n-To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n-\n-\n-\n-Getting Started\n------------------------------------------------------------------------------------------------------------------------\n-\n-Setup & Installation\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-\n-Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n-have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`__.\n-\n-After you have your AWS Account you need to install the ``sagemaker`` sdk for πŸ€— Accelerate with.\n-\n-.. code-block:: \n-\n- pip install \"accelerate[sagemaker]\" --upgrade\n-\n-\n-πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. πŸ€—\n-Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a\n-``requirements.txt`` in the same directory where your training script is located and add it as dependency.\n-\n-.. code-block:: \n-\n- accelerate\n-\n-You should also add any other dependencies you have to this ``requirements.txt``.\n-\n-\n-Configure πŸ€— Accelerate\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n-the πŸ€— Accelerate CLI.\n-\n-.. code-block:: \n-\n- accelerate config\n- # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n-\n-\n-πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.\n-\n-.. note::\n- πŸ€— Accelerate is not saving any of your credentials.\n-\n-\n-Prepare a πŸ€— Accelerate fine-tuning script\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n-after training you need to specify either ``/opt/ml/model`` or use ``os.environ[\"SM_MODEL_DIR\"]`` as your save\n-directory. After training, artifacts in this directory are uploaded to S3.\n-\n-\n-.. code-block:: diff\n-\n- - torch.save('/opt/ml/model`)\n- + accelerator.save('/opt/ml/model')\n-\n-\n-.. warning::\n- SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n- specify type as bool in your script and provide an explicit True or False value for this hyperparameter. `[REF]\n- <https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script>`__.\n-\n-\n-Launch Training\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-You can launch your training with πŸ€— Accelerate CLI with\n-\n-.. code-block:: \n-\n- accelerate launch path_to_script.py --args_to_the_script\n-\n-\n-This will launch your training script using your configuration. The only thing you have to do is provide all the\n-arguments needed by your training script as named arguments.\n-\n-**Examples**\n-\n-.. note::\n- If you run one of the example scripts, don't forget to add ``accelerator.save('/opt/ml/model')`` to it.\n-\n-.. code-block:: \n-\n- accelerate launch ./examples/sagemaker_example.py \n-\n-\n-Outputs:\n-\n-.. code-block:: \n-\n- Configuring Amazon SageMaker environment\n- Converting Arguments to Hyperparameters\n- Creating Estimator\n- 2021-04-08 11:56:50 Starting - Starting the training job...\n- 2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress\n- .........\n- 2021-04-08 11:58:54 Starting - Preparing the instances for training.........\n- 2021-04-08 12:00:24 Downloading - Downloading input data\n- 2021-04-08 12:00:24 Training - Downloading the training image..................\n- 2021-04-08 12:03:39 Training - Training image download completed. Training in progress..\n- ........\n- epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037}\n- epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689}\n- epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304}\n- ........\n- 2021-04-08 12:05:40 Uploading - Uploading generated training model\n- 2021-04-08 12:05:40 Completed - Training job completed\n- Training seconds: 331\n- Billable seconds: 331\n- You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz\n-\n-\n-\n-Advanced Features\n------------------------------------------------------------------------------------------------------------------------\n-\n-Distributed Training: Data Parallelism\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-*currently in development, will be supported soon.*\n-\n-Distributed Training: Model Parallelism\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-*currently in development, will be supported soon.*\n-\n-Python packages and dependencies\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. If you\n-want to use different/other Python packages you can do this by adding them to the ``requirements.txt``. These packages\n-will be installed before your training script is started.\n-\n-Remote scripts: Use scripts located on Github\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-*undecided if feature is needed. Contact us if you would like this feature.*\n-\n-Use Spot Instances\n-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n-\n-*undecided if feature is needed. Contact us if you would like this feature.*\ndiff --git a/setup.py b/setup.py\nindex bec3b9805..e1b2d3d1b 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -17,15 +17,7 @@\n \n extras = {}\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n-extras[\"docs\"] = [\n- \"docutils==0.16.0\",\n- \"recommonmark\",\n- \"sphinx==3.2.1\",\n- \"sphinx-markdown-tables\",\n- \"sphinx-rtd-theme==0.4.3\",\n- \"sphinx-copybutton\",\n- \"sphinxext-opengraph==0.4.1\",\n-]\n+extras[\"docs\"] = []\n extras[\"test\"] = [\n \"pytest\",\n \"pytest-xdist\",\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex f77a26a56..54ca364f6 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -5,7 +5,7 @@\n __version__ = \"0.6.0.dev0\"\n \n from .accelerator import Accelerator\n-from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs\n+from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs\n from .launchers import debug_launcher, notebook_launcher\n from .state import DistributedType\n from .utils import DeepSpeedPlugin, synchronize_rng_states\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 694dcc769..fabd3e9fc 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -56,49 +56,48 @@ class Accelerator:\n Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training.\n \n Args:\n- device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n+ device_placement (`bool`, *optional*, defaults to `True`):\n Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,\n etc...).\n- split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ split_batches (`bool`, *optional*, defaults to `False`):\n Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If\n- :obj:`True` the actual batch size used will be the same on any kind of distributed processes, but it must\n- be a round multiple of the :obj:`num_processes` you are using. If :obj:`False`, actual batch size used will\n- be the one set in your script multiplied by the number of processes.\n- mixed_precision (:obj:`str`, `optional`):\n+ `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a\n+ round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set\n+ in your script multiplied by the number of processes.\n+ mixed_precision (`str`, *optional*):\n Whether or not to use mixed precision training (fp16 or bfloat16). Choose from 'no','fp16','bf16'. Will\n- default to the value in the environment variable :obj:`MIXED_PRECISION`, which will use the default value\n- in the accelerate config of the current system or the flag passed with the :obj:`accelerate.launch`\n- command. 'fp16' requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n- cpu (:obj:`bool`, `optional`):\n- Whether or not to force the script to execute on CPU. Will ignore GPU available if set to :obj:`True` and\n- force the execution on one process only.\n- deepspeed_plugin (:obj:`DeepSpeedPlugin`, `optional`):\n+ default to the value in the environment variable `MIXED_PRECISION`, which will use the default value in the\n+ accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp16'\n+ requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n+ cpu (`bool`, *optional*):\n+ Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force\n+ the execution on one process only.\n+ deepspeed_plugin (`DeepSpeedPlugin`, *optional*):\n Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured\n- directly using `accelerate config`\n- rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n+ directly using *accelerate config*\n+ rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration in your prepared\n dataloaders. Should be one or several of:\n \n- - :obj:`\"torch\"`: the base torch random number generator\n- - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n- - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n- - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler in\n- your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n-\n- Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n- >= 1.6.\n- dispatch_batches (:obj:`bool`, `optional`):\n- If set to :obj:`True`, the dataloader prepared by the Accelerator is only iterated through on the main\n- process and then the batches are split and broadcast to each process. Will default to :obj:`True` for\n- :obj:`DataLoader` whose underlying dataset is an :obj:`IterableDataset`, :obj:`False` otherwise.\n- kwargs_handlers (list of kwargs handlers, `optional`)\n- A list of :obj:`KwargHandler` to customize how the objects related to distributed training or mixed\n- precision are created. See :doc:`kwargs` for more information.\n+ - `\"torch\"`: the base torch random number generator\n+ - `\"cuda\"`: the CUDA random number generator (GPU only)\n+ - `\"xla\"`: the XLA random number generator (TPU only)\n+ - `\"generator\"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your\n+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n+\n+ Will default to `[\"torch\"]` for PyTorch versions <=1.5.1 and `[\"generator\"]` for PyTorch versions >= 1.6.\n+ dispatch_batches (`bool`, *optional*):\n+ If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n+ and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n+ underlying dataset is an `IterableDataset`, `False` otherwise.\n+ kwargs_handlers (`List[KwargHandler]`, *optional*)\n+ A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision\n+ are created. See [kwargs](kwargs) for more information.\n \n Attributes\n \n- - **device** (:obj:`torch.device`) -- The device to use.\n- - **state** (:class:`~accelerate.AcceleratorState`) -- The distributed setup state.\n+ - **device** (`torch.device`) -- The device to use.\n+ - **state** ([`~state.AcceleratorState`]) -- The distributed setup state.\n \"\"\"\n \n def __init__(\n@@ -276,7 +275,7 @@ def _goes_first(self, is_main):\n \n def print(self, *args, **kwargs):\n \"\"\"\n- Use in replacement of :obj:`print()` to only print once per server.\n+ Use in replacement of `print()` to only print once per server.\n \"\"\"\n if self.is_local_main_process:\n print(*args, **kwargs)\n@@ -296,14 +295,14 @@ def _prepare_one(self, obj):\n \n def prepare(self, *args):\n \"\"\"\n- Prepare all objects passed in :obj:`args` for distributed training and mixed precision, then return them in the\n- same order.\n+ Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same\n+ order.\n \n Accepts the following type of objects:\n \n- - :obj:`torch.utils.data.DataLoader`: PyTorch Dataloader\n- - :obj:`torch.nn.Module`: PyTorch Module\n- - :obj:`torch.optim.Optimizer`: PyTorch Optimizer\n+ - `torch.utils.data.DataLoader`: PyTorch Dataloader\n+ - `torch.nn.Module`: PyTorch Module\n+ - `torch.optim.Optimizer`: PyTorch Optimizer\n \"\"\"\n # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will\n # have parameters disconnected from the model (so no training :-( ).\n@@ -460,7 +459,7 @@ def prepare_optimizer(self, optimizer):\n \n def backward(self, loss, **kwargs):\n \"\"\"\n- Use :obj:`accelerator.backward(loss)` in lieu of :obj:`loss.backward()`.\n+ Use `accelerator.backward(loss)` in lieu of `loss.backward()`.\n \"\"\"\n if self.distributed_type == DistributedType.DEEPSPEED:\n self.deepspeed_engine.backward(loss, **kwargs)\n@@ -474,9 +473,9 @@ def unscale_gradients(self, optimizer=None):\n Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.\n \n Args:\n- optimizer (:obj:`torch.optim.Optimizer` or :obj:`List[torch.optim.Optimizer]`, `optional`):\n+ optimizer (`torch.optim.Optimizer` or `List[torch.optim.Optimizer]`, *optional*):\n The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers\n- that were passed to :meth:`~accelerate.Accelerator.prepare`.\n+ that were passed to [`~Accelerator.prepare`].\n \"\"\"\n if self.state.use_fp16 and self.native_amp:\n if optimizer is None:\n@@ -491,34 +490,33 @@ def unscale_gradients(self, optimizer=None):\n \n def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \"\"\"\n- Should be used in place of :func:`torch.nn.utils.clip_grad_norm_`.\n+ Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n \"\"\"\n self.unscale_gradients()\n torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n \n def clip_grad_value_(self, parameters, clip_value):\n \"\"\"\n- Should be used in place of :func:`torch.nn.utils.clip_grad_value_`.\n+ Should be used in place of `torch.nn.utils.clip_grad_value_`.\n \"\"\"\n self.unscale_gradients()\n torch.nn.utils.clip_grad_value_(parameters, clip_value)\n \n def gather(self, tensor):\n \"\"\"\n- Gather the values in `tensor` accross all processes and concatenate them on the first dimension. Useful to\n+ Gather the values in *tensor* accross all processes and concatenate them on the first dimension. Useful to\n regroup the predictions from all processes when doing evaluation.\n \n Note:\n This gather happens in all processes.\n \n Args:\n- tensor (:obj:`torch.Tensor`, or a nested tuple/list/dictionary of :obj:`torch.Tensor`):\n+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):\n The tensors to gather across all processes.\n \n Returns:\n- :obj:`torch.Tensor`, or a nested tuple/list/dictionary of :obj:`torch.Tensor`: The gathered tensor(s). Note\n- that the first dimension of the result is `num_processes` multiplied by the first dimension of the input\n- tensors.\n+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the\n+ first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors.\n \"\"\"\n return gather(tensor)\n \n@@ -528,24 +526,24 @@ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\n they can safely be gathered.\n \n Args:\n- tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to gather.\n- dim (:obj:`int`, `optional`, defaults to 0):\n+ dim (`int`, *optional*, defaults to 0):\n The dimension on which to pad.\n- pad_index (:obj:`int`, `optional`, defaults to 0):\n+ pad_index (`int`, *optional*, defaults to 0):\n The value with which to pad.\n- pad_first (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ pad_first (`bool`, *optional*, defaults to `False`):\n Whether to pad at the beginning or the end.\n \"\"\"\n return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)\n \n def unwrap_model(self, model):\n \"\"\"\n- Unwraps the :obj:`model` from the additional layer possible added by :meth:`~accelerate.Accelerator.prepare`.\n- Useful before saving the model.\n+ Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving\n+ the model.\n \n Args:\n- model (:obj:`torch.nn.Module`):\n+ model (`torch.nn.Module`):\n The model to unwrap.\n \"\"\"\n return extract_model_from_parallel(model)\n@@ -559,12 +557,12 @@ def wait_for_everyone(self):\n \n def save(self, obj, f):\n \"\"\"\n- Save the object passed to disk once per machine. Use in place of :obj:`torch.save`.\n+ Save the object passed to disk once per machine. Use in place of `torch.save`.\n \n Args:\n obj: The object to save.\n- f (:obj:`str` or :obj:`os.PathLike`):\n- Where to save the content of :obj:`obj`.\n+ f (`str` or `os.PathLike`):\n+ Where to save the content of `obj`.\n \"\"\"\n save(obj, f)\n \n@@ -573,7 +571,7 @@ def save_state(self, output_dir: str):\n Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n \n Args:\n- output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ output_dir (`str` or `os.PathLike`):\n The name of the folder to save all relevant weights and states.\n \"\"\"\n # Check if folder exists\n@@ -593,7 +591,7 @@ def load_state(self, input_dir: str):\n Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n \n Args:\n- input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ input_dir (`str` or `os.PathLike`):\n The name of the folder all relevant weights and states were saved in.\n \"\"\"\n # Check if folder exists\n@@ -674,7 +672,11 @@ def register_for_checkpointing(self, *objects):\n These should be utilized when the state is being loaded or saved in the same script. It is not designed to be\n used in different scripts\n \n- Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored.\n+ <Tip>\n+\n+ Every `object` must have a `load_state_dict` and `state_dict` function to be stored.\n+\n+ </Tip>\n \"\"\"\n invalid_objects = []\n for obj in objects:\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex e1a28d108..0636dffaa 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -41,15 +41,15 @@ def save_accelerator_state(\n Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.\n \n Args:\n- output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ output_dir (`str` or `os.PathLike`):\n The name of the folder to save all relevant weights and states.\n- model_states (:obj:`List[torch.nn.Module]`):\n+ model_states (`List[torch.nn.Module]`):\n A list of model states\n- optimizers (:obj:`List[torch.optim.Optimizer]`):\n+ optimizers (`List[torch.optim.Optimizer]`):\n A list of optimizer instances\n- process_index (:obj:`int`):\n+ process_index (`int`):\n The current process index in the Accelerator state\n- scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):\n+ scaler (`torch.cuda.amp.GradScaler`, *optional*):\n An optional gradient scaler instance to save\n \"\"\"\n # Model states\n@@ -92,16 +92,16 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n Loads states of the models, optimizers, scaler, and RNG generators from a given directory.\n \n Args:\n- input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ input_dir (`str` or `os.PathLike`):\n The name of the folder to load all relevant weights and states.\n- model_stmodelsates (:obj:`List[torch.nn.Module]`):\n+ model_stmodelsates (`List[torch.nn.Module]`):\n A list of model instances\n- optimizers (:obj:`List[torch.optim.Optimizer]`):\n+ optimizers (`List[torch.optim.Optimizer]`):\n A list of optimizer instances\n- process_index (:obj:`int`):\n+ process_index (`int`):\n The current process index in the Accelerator state\n- scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):\n- An optional `GradScaler` instance to load\n+ scaler (`torch.cuda.amp.GradScaler`, *optional*):\n+ An optional *GradScaler* instance to load\n \"\"\"\n # Model states\n for i, model in enumerate(models):\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex cc46e5577..a827633c0 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -67,34 +67,34 @@\n \n class BatchSamplerShard(BatchSampler):\n \"\"\"\n- Wraps a PyTorch :obj:`BatchSampler` to generate batches for one of the processes only. Instances of this class will\n- always yield a number of batches that is a round multiple of :obj:`num_processes` and that all have the same size.\n- Depending on the value of the :obj:`drop_last` attribute of the batch sampler passed, it will either stop the\n- iteration at the first batch that would be too small / not present on all processes or loop with indices from the\n- beginning.\n+ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will\n+ always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.\n+ Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration\n+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.\n \n Args:\n- batch_sampler (:obj:`torch.utils.data.sampler.BatchSampler`):\n+ batch_sampler (`torch.utils.data.sampler.BatchSampler`):\n The batch sampler to split in several shards.\n- num_processes (:obj:`int`, `optional`, defaults to 1):\n+ num_processes (`int`, *optional*, defaults to 1):\n The number of processes running concurrently.\n- process_index (:obj:`int`, `optional`, defaults to 0):\n+ process_index (`int`, *optional*, defaults to 0):\n The index of the current process.\n- split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ split_batches (`bool`, *optional*, defaults to `False`):\n Whether the shards should be created by splitting a batch to give a piece of it on each process, or by\n yielding different full batches on each process.\n \n- On two processes with a sampler of :obj:`[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:\n+ On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:\n \n- - the sampler on process 0 to yield :obj:`[0, 1, 2, 3]` and the sampler on process 1 to yield :obj:`[4, 5,\n- 6, 7]` if this argument is set to :obj:`False`.\n- - the sampler on process 0 to yield :obj:`[0, 1]` then :obj:`[4, 5]` and the sampler on process 1 to yield\n- :obj:`[2, 3]` then :obj:`[6, 7]` if this argument is set to :obj:`True`.\n+ - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if\n+ this argument is set to `False`.\n+ - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`\n+ then `[6, 7]` if this argument is set to `True`.\n \n- .. warning::\n+ <Tip warning={true}>\n \n- This does not support :obj:`BatchSampler` with varying batch size yet.\n- \"\"\"\n+ This does not support `BatchSampler` with varying batch size yet.\n+\n+ </Tip>\"\"\"\n \n def __init__(\n self,\n@@ -188,35 +188,35 @@ def _iter_with_no_split(self):\n \n class IterableDatasetShard(IterableDataset):\n \"\"\"\n- Wraps a PyTorch :obj:`IterableDataset` to generate samples for one of the processes only. Instances of this class\n- will always yield a number of samples that is a round multiple of the actual batch size (depending of the value of\n- :obj:`split_batches`, this is either :obj:`batch_size` or :obj:`batch_size x num_processes`). Depending on the\n- value of the :obj:`drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first\n- batch that would be too small or loop with indices from the beginning.\n+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will\n+ always yield a number of samples that is a round multiple of the actual batch size (depending of the value of\n+ `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the\n+ `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would\n+ be too small or loop with indices from the beginning.\n \n Args:\n- dataset (:obj:`torch.utils.data.dataset.IterableDataset`):\n+ dataset (`torch.utils.data.dataset.IterableDataset`):\n The batch sampler to split in several shards.\n- batch_size (:obj:`int`, `optional`, defaults to 1):\n- The size of the batches per shard (if :obj:`split_batches=False`) or the size of the batches (if\n- :obj:`split_batches=True`).\n- drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ batch_size (`int`, *optional*, defaults to 1):\n+ The size of the batches per shard (if `split_batches=False`) or the size of the batches (if\n+ `split_batches=True`).\n+ drop_last (`bool`, *optional*, defaults to `False`):\n Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the\n beginning.\n- num_processes (:obj:`int`, `optional`, defaults to 1):\n+ num_processes (`int`, *optional*, defaults to 1):\n The number of processes running concurrently.\n- process_index (:obj:`int`, `optional`, defaults to 0):\n+ process_index (`int`, *optional*, defaults to 0):\n The index of the current process.\n- split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ split_batches (`bool`, *optional*, defaults to `False`):\n Whether the shards should be created by splitting a batch to give a piece of it on each process, or by\n yielding different full batches on each process.\n \n- On two processes with an iterable dataset yielding of :obj:`[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:\n+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:\n \n- - the shard on process 0 to yield :obj:`[0, 1, 2, 3]` and the shard on process 1 to yield :obj:`[4, 5, 6,\n- 7]` if this argument is set to :obj:`False`.\n- - the shard on process 0 to yield :obj:`[0, 1, 4, 5]` and the sampler on process 1 to yield :obj:`[2, 3, 6,\n- 7]` if this argument is set to :obj:`True`.\n+ - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this\n+ argument is set to `False`.\n+ - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if\n+ this argument is set to `True`.\n \"\"\"\n \n def __init__(\n@@ -269,25 +269,25 @@ def __iter__(self):\n \n class DataLoaderShard(DataLoader):\n \"\"\"\n- Subclass of a PyTorch :obj:`DataLoader` that will deal with device placement and current distributed setup.\n+ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.\n \n Args:\n- dataset (:obj:`torch.utils.data.dataset.Dataset`):\n+ dataset (`torch.utils.data.dataset.Dataset`):\n The dataset to use to build this datalaoder.\n- device (:obj:`torch.device`, `optional`):\n+ device (`torch.device`, *optional*):\n If passed, the device to put all batches on.\n- rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n+ rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration. Should be one or\n several of:\n \n- - :obj:`\"torch\"`: the base torch random number generator\n- - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n- - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n- - :obj:`\"generator\"`: an optional :obj:`torch.Generator`\n- generator (:obj:`torch.Generator`, `optional`):\n+ - `\"torch\"`: the base torch random number generator\n+ - `\"cuda\"`: the CUDA random number generator (GPU only)\n+ - `\"xla\"`: the XLA random number generator (TPU only)\n+ - `\"generator\"`: an optional `torch.Generator`\n+ generator (`torch.Generator`, *optional*):\n A random number generator to keep synchronized across processes.\n kwargs:\n- All other keyword arguments to pass to the regular :obj:`DataLoader` initialization.\n+ All other keyword arguments to pass to the regular `DataLoader` initialization.\n \"\"\"\n \n def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):\n@@ -308,21 +308,21 @@ def __iter__(self):\n \n class DataLoaderDispatcher(DataLoader):\n \"\"\"\n- Subclass of a PyTorch :obj:`DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each\n+ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each\n process their part of the batch.\n \n Args:\n- split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n- Whether the resulting :obj:`DataLoader` should split the batches of the original data loader across devices\n- or yield full batches (in which case it will yield batches starting at the :obj:`process_index`-th and\n- advancing of :obj:`num_processes` batches at each iteration).\n+ split_batches (`bool`, *optional*, defaults to `False`):\n+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or\n+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of\n+ `num_processes` batches at each iteration).\n \n- Another way to see this is that the observed batch size will be the same as the initial :obj:`dataloader`\n- if this option is set to :obj:`True`, the batch size of the initial :obj:`dataloader` multiplied by\n- :obj:`num_processes` otherwise.\n+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if\n+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`\n+ otherwise.\n \n- Setting this option to :obj:`True` requires that the batch size of the :obj:`dataloader` is a round\n- multiple of :obj:`batch_size`.\n+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of\n+ `batch_size`.\n \"\"\"\n \n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n@@ -425,58 +425,58 @@ def prepare_data_loader(\n dispatch_batches: Optional[bool] = None,\n ) -> DataLoader:\n \"\"\"\n- Wraps a PyTorch :obj:`DataLoader` to generate batches for one of the processes only.\n+ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.\n \n- Depending on the value of the :obj:`drop_last` attribute of the :obj:`dataloader` passed, it will either stop the\n- iteration at the first batch that would be too small / not present on all processes or loop with indices from the\n- beginning.\n+ Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration\n+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.\n \n Args:\n- dataloader (:obj:`torch.utils.data.dataloader.DataLoader`):\n+ dataloader (`torch.utils.data.dataloader.DataLoader`):\n The data loader to split across several devices.\n- device (:obj:`torch.device`):\n- The target device for the returned :obj:`DataLoader`.\n- num_processes (:obj:`int`, `optional`):\n+ device (`torch.device`):\n+ The target device for the returned `DataLoader`.\n+ num_processes (`int`, *optional*):\n The number of processes running concurrently. Will default to the value given by\n- :class:`~accelerate.AcceleratorState`.\n- process_index (:obj:`int`, `optional`):\n- The index of the current process. Will default to the value given by :class:`~accelerate.AcceleratorState`.\n- split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n- Whether the resulting :obj:`DataLoader` should split the batches of the original data loader across devices\n- or yield full batches (in which case it will yield batches starting at the :obj:`process_index`-th and\n- advancing of :obj:`num_processes` batches at each iteration).\n-\n- Another way to see this is that the observed batch size will be the same as the initial :obj:`dataloader`\n- if this option is set to :obj:`True`, the batch size of the initial :obj:`dataloader` multiplied by\n- :obj:`num_processes` otherwise.\n-\n- Setting this option to :obj:`True` requires that the batch size of the :obj:`dataloader` is a round\n- multiple of :obj:`batch_size`.\n- put_on_device (:obj:`bool`, `optional`, defaults to :obj:`False`):\n- Whether or not to put the batches on :obj:`device` (only works if the batches are nested list, tuples or\n+ [`~state.AcceleratorState`].\n+ process_index (`int`, *optional*):\n+ The index of the current process. Will default to the value given by [`~state.AcceleratorState`].\n+ split_batches (`bool`, *optional*, defaults to `False`):\n+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or\n+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of\n+ `num_processes` batches at each iteration).\n+\n+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if\n+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`\n+ otherwise.\n+\n+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of\n+ `batch_size`.\n+ put_on_device (`bool`, *optional*, defaults to `False`):\n+ Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or\n dictionaries of tensors).\n- rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n+ rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration. Should be one or\n several of:\n \n- - :obj:`\"torch\"`: the base torch random number generator\n- - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n- - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n- - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler in\n- your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n+ - `\"torch\"`: the base torch random number generator\n+ - `\"cuda\"`: the CUDA random number generator (GPU only)\n+ - `\"xla\"`: the XLA random number generator (TPU only)\n+ - `\"generator\"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your\n+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n- dispatch_batches (:obj:`bool`, `optional`):\n- If set to :obj:`True`, the datalaoder prepared is only iterated through on the main process and then the\n- batches are split and broadcast to each process. Will default to :obj:`True` when the underlying dataset is\n- an :obj:`IterableDataset`, :obj:`False` otherwise.\n+ dispatch_batches (`bool`, *optional*):\n+ If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\n+ are split and broadcast to each process. Will default to `True` when the underlying dataset is an\n+ `IterableDataset`, `False` otherwise.\n \n Returns:\n- :obj:`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n+ `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n \n- .. warning::\n+ <Tip warning={true}>\n \n- This does not support :obj:`BatchSampler` with varying batch size yet.\n- \"\"\"\n+ This does not support `BatchSampler` with varying batch size yet.\n+\n+ </Tip>\"\"\"\n if dispatch_batches is None:\n if version.parse(torch.__version__) < version.parse(\"1.8.0\") or not put_on_device:\n dispatch_batches = False\ndiff --git a/src/accelerate/deepspeed_utils.py b/src/accelerate/deepspeed_utils.py\nindex 07450a87e..5f261f2e3 100644\n--- a/src/accelerate/deepspeed_utils.py\n+++ b/src/accelerate/deepspeed_utils.py\n@@ -71,7 +71,7 @@ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):\n Internal wrapper around a deepspeed optimizer.\n \n Args:\n- optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n+ optimizer (`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n \"\"\"\n \ndiff --git a/src/accelerate/kwargs_handlers.py b/src/accelerate/kwargs_handlers.py\nindex 51760efdc..cf5985644 100644\n--- a/src/accelerate/kwargs_handlers.py\n+++ b/src/accelerate/kwargs_handlers.py\n@@ -20,7 +20,7 @@\n \n class KwargsHandler:\n \"\"\"\n- Internal mixin that implements a :obj:`to_kwargs()` method for a dataclass.\n+ Internal mixin that implements a `to_kwargs()` method for a dataclass.\n \"\"\"\n \n def to_dict(self):\n@@ -38,15 +38,16 @@ def to_kwargs(self):\n @dataclass\n class DistributedDataParallelKwargs(KwargsHandler):\n \"\"\"\n- Use this object in your :class:`~accelerate.Accelerator` to customize how your model is wrapped in a\n- :obj:`torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this `wrapper\n- <https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html>`__ for more information\n- on each argument.\n+ Use this object in your [`Accelerator`] to customize how your model is wrapped in a\n+ `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this\n+ [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more\n+ information on each argument.\n \n- .. warning::\n+ <Tip warning={true}>\n \n- :obj:`gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\n- \"\"\"\n+ `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\n+\n+ </Tip>\"\"\"\n \n dim: int = 0\n broadcast_buffers: bool = True\n@@ -59,14 +60,15 @@ class DistributedDataParallelKwargs(KwargsHandler):\n @dataclass\n class GradScalerKwargs(KwargsHandler):\n \"\"\"\n- Use this object in your :class:`~accelerate.Accelerator` to customize the behavior of mixed precision, specifically\n- how the :obj:`torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this `scaler\n- <https://pytorch.org/docs/stable/amp.html?highlight=gradscaler>`__ for more information on each argument.\n+ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the\n+ `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this\n+ [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.\n \n- .. warning::\n+ <Tip warning={true}>\n \n- :obj:`GradScaler` is only available in PyTorch 1.5.0 and later versions.\n- \"\"\"\n+ `GradScaler` is only available in PyTorch 1.5.0 and later versions.\n+\n+ </Tip>\"\"\"\n \n init_scale: float = 65536.0\n growth_factor: float = 2.0\n@@ -78,10 +80,10 @@ class GradScalerKwargs(KwargsHandler):\n @dataclass\n class InitProcessGroupKwargs(KwargsHandler):\n \"\"\"\n- Use this object in your :class:`~accelerate.Accelerator` to customize the initialization of the distributed\n- processes. Please refer to the documentation of this `method\n- <https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group>`__ for more information on\n- each argument.\n+ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer\n+ to the documentation of this\n+ [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more\n+ information on each argument.\n \"\"\"\n \n init_method: Optional[str] = None\ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex a47eb9a4c..48c4404dd 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -31,17 +31,17 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n multiple cores for instance).\n \n Args:\n- function (:obj:`Callable`):\n+ function (`Callable`):\n The training function to execute. If it accepts arguments, the first argument should be the index of the\n process run.\n- args (:obj:`Tuple`):\n- Tuple of arguments to pass to the function (it will receive :obj:`*args`).\n- num_processes (:obj:`int`, `optional`):\n+ args (`Tuple`):\n+ Tuple of arguments to pass to the function (it will receive `*args`).\n+ num_processes (`int`, *optional*):\n The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to\n the number of GPUs available otherwise.\n- mixed_precision (:obj:`str`, `optional`, defaults to :obj:`no`):\n- If :obj:`fp16` or :obj:`bf16`, will use mixed precision training on multi-GPU.\n- use_port (:obj:`str`, `optional`, defaults to :obj:`\"29500\"`):\n+ mixed_precision (`str`, *optional*, defaults to `\"no\"`):\n+ If `fp16` or `bf16`, will use mixed precision training on multi-GPU.\n+ use_port (`str`, *optional*, defaults to `\"29500\"`):\n The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n # Are we in a google colab or a Kaggle Kernel?\n@@ -140,18 +140,19 @@ def debug_launcher(function, args=(), num_processes=2):\n \"\"\"\n Launches a training function using several processes on CPU for debugging purposes.\n \n- .. warning::\n+ <Tip warning={true}>\n \n- This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n- only use the CPU.\n+ This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n+ only use the CPU.\n \n+ </Tip>\n \n Args:\n- function (:obj:`Callable`):\n+ function (`Callable`):\n The training function to execute.\n- args (:obj:`Tuple`):\n- Tuple of arguments to pass to the function (it will receive :obj:`*args`).\n- num_processes (:obj:`int`, *optional*, defaults to 2):\n+ args (`Tuple`):\n+ Tuple of arguments to pass to the function (it will receive `*args`).\n+ num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n \"\"\"\n if version.parse(torch.__version__) < version.parse(\"1.5.0\"):\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 52217ba6a..4360c0c09 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -41,12 +41,12 @@ class AcceleratedOptimizer(torch.optim.Optimizer):\n Internal wrapper around a torch optimizer.\n \n Args:\n- optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n+ optimizer (`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n- device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n+ device_placement (`bool`, *optional*, defaults to `True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n- :obj:`optimizer` on the right device.\n- scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n+ `optimizer` on the right device.\n+ scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n \ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex e8c910e19..0e0c21e1a 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -127,19 +127,19 @@ class ComputeEnvironment(str, Enum):\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\n- This is a variation of a `singleton class <https://en.wikipedia.org/wiki/Singleton_pattern>`__ in the sense that\n- all instance of :obj:`AcceleratorState` share the same state, which is initialized on the first instantiation.\n-\n- Attributes\n-\n- - **device** (:obj:`torch.device`) -- The device to use.\n- - **distributed_type** (:obj:`~accelerate.state.DistributedType`) -- The type of distributed environment\n- currently in use.\n- - **num_processes** (:obj:`int`) -- The number of processes currently launched in parallel.\n- - **process_index** (:obj:`int`) -- The index of the current process.\n- - **local_process_index** (:obj:`int`) -- The index of the current process on the current server.\n- - **mixed_precision** (:obj:`str`) -- Whether or not the current script will use mixed precision. If you are\n- using mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n+ This is a variation of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all\n+ instance of `AcceleratorState` share the same state, which is initialized on the first instantiation.\n+\n+ Attributes:\n+\n+ - **device** (`torch.device`) -- The device to use.\n+ - **distributed_type** (`~accelerate.state.DistributedType`) -- The type of distributed environment currently\n+ in use.\n+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.\n+ - **process_index** (`int`) -- The index of the current process.\n+ - **local_process_index** (`int`) -- The index of the current process on the current server.\n+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision. If you are using\n+ mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n \"\"\"\n \n _shared_state = {}\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 4b20cc1c3..69e74f2a5 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -65,11 +65,12 @@ class TensorInformation:\n \n def set_seed(seed: int, device_specific: bool = False):\n \"\"\"\n- Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.\n \n Args:\n- seed (:obj:`int`): The seed to set.\n- device_specific (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to differ the seed on each device slightly with `self.process_index`\n+ seed (`int`): The seed to set.\n+ device_specific (`bool`, *optional*, defaults to `False`):\n+ Whether to differ the seed on each device slightly with `self.process_index`.\n \"\"\"\n if device_specific:\n seed += AcceleratorState().process_index\n@@ -146,23 +147,22 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.\n \n Args:\n- func (:obj:`callable`):\n+ func (`callable`):\n The function to recursively apply.\n- data (nested list/tuple/dictionary of :obj:`main_type`):\n- The data on which to apply :obj:`func`\n+ data (nested list/tuple/dictionary of `main_type`):\n+ The data on which to apply `func`\n *args:\n- Positional arguments that will be passed to :obj:`func` when applied on the unpacked data.\n- main_type (:obj:`type`, `optional`, defaults to :obj:`torch.Tensor`):\n- The base type of the objects to which apply :obj:`func`.\n- error_on_other_type (:obj:`bool`, `optional`, defaults to :obj:`False`):\n- Whether to return an error or not if after unpacking :obj:`data`, we get on an object that is not of type\n- :obj:`main_type`. If :obj:`False`, the function will leave objects of types different than :obj:`main_type`\n- unchanged.\n+ Positional arguments that will be passed to `func` when applied on the unpacked data.\n+ main_type (`type`, *optional*, defaults to `torch.Tensor`):\n+ The base type of the objects to which apply `func`.\n+ error_on_other_type (`bool`, *optional*, defaults to `False`):\n+ Whether to return an error or not if after unpacking `data`, we get on an object that is not of type\n+ `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.\n **kwargs:\n- Keyword arguments that will be passed to :obj:`func` when applied on the unpacked data.\n+ Keyword arguments that will be passed to `func` when applied on the unpacked data.\n \n Returns:\n- The same data structure as :obj:`data` with :obj:`func` applied to every object of type :obj:`main_type`.\n+ The same data structure as `data` with `func` applied to every object of type `main_type`.\n \"\"\"\n if isinstance(data, (tuple, list)):\n return honor_type(\n@@ -198,13 +198,13 @@ def send_to_device(tensor, device):\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n \n Args:\n- tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to send to a given device.\n- device (:obj:`torch.device`):\n- The device to send the data to\n+ device (`torch.device`):\n+ The device to send the data to.\n \n Returns:\n- The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n+ The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n \n def _send_to_device(t, device):\n@@ -221,11 +221,11 @@ def get_data_structure(data):\n Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.\n \n Args:\n- data (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ data (nested list/tuple/dictionary of `torch.Tensor`):\n The data to send to analyze.\n \n Returns:\n- The same data structure as :obj:`data` with :class:`~accelerate.utils.TensorInformation` instead of tensors.\n+ The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.\n \"\"\"\n \n def _get_data_structure(tensor):\n@@ -236,11 +236,10 @@ def _get_data_structure(tensor):\n \n def initialize_tensors(data_structure):\n \"\"\"\n- Recursively initializes tensors from a nested list/tuple/dictionary of\n- :class:`~accelerate.utils.TensorInformation`.\n+ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].\n \n Returns:\n- The same data structure as :obj:`data` with tensors instead of :class:`~accelerate.utils.TensorInformation`.\n+ The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].\n \"\"\"\n \n def _initialize_tensor(tensor_info):\n@@ -254,11 +253,11 @@ def convert_to_fp32(tensor):\n Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.\n \n Args:\n- tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to convert from FP16/BF16 to FP32.\n \n Returns:\n- The same data structure as :obj:`tensor` with all tensors that were in FP16/BF16 precision converted to FP32.\n+ The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.\n \"\"\"\n \n def _convert_to_fp32(tensor):\n@@ -279,11 +278,11 @@ def convert_outputs_to_fp32(model_forward):\n precision will be convert back to FP32.\n \n Args:\n- model_forward (:obj:`Callable`):\n+ model_forward (`Callable`):\n The function which outputs we want to treat.\n \n Returns:\n- The same function as :obj:`model_forward` but with converted outputs.\n+ The same function as `model_forward` but with converted outputs.\n \"\"\"\n \n def convert_outputs(*args, **kwargs):\n@@ -298,10 +297,10 @@ def extract_model_from_parallel(model):\n Extract a model from its distributed containers.\n \n Args:\n- model (:obj:`torch.nn.Module`): The model to extract.\n+ model (`torch.nn.Module`): The model to extract.\n \n Returns:\n- :obj:`torch.nn.Module`: The extracted model.\n+ `torch.nn.Module`: The extracted model.\n \"\"\"\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n if is_deepspeed_available():\n@@ -343,11 +342,11 @@ def gather(tensor):\n Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n \n Args:\n- tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to gather.\n \n Returns:\n- The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n+ The same data structure as `tensor` with all tensors sent to the proper device.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n@@ -380,7 +379,7 @@ def gather_object(object: Any):\n The data to gather.\n \n Returns:\n- The same data structure as :obj:`object` with all the objects sent to every device.\n+ The same data structure as `object` with all the objects sent to every device.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n raise NotImplementedError(\"gather objects in TPU is not supported\")\n@@ -413,13 +412,13 @@ def broadcast(tensor, from_process: int = 0):\n Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.\n \n Args:\n- tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to gather.\n- from_process (:obj:`int`, `optional`, defaults to 0):\n+ from_process (`int`, *optional*, defaults to 0):\n The process from which to send the data\n \n Returns:\n- The same data structure as :obj:`tensor` with all tensors broadcasted to the proper device.\n+ The same data structure as `tensor` with all tensors broadcasted to the proper device.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_broadcast(tensor, src=from_process, name=\"accelerate.utils.broadcast\")\n@@ -438,7 +437,7 @@ def broadcast_object_list(object_list, from_process: int = 0):\n Args:\n object_list (list of picklable objects):\n The list of objects to broadcast. This list will be modified inplace.\n- from_process (:obj:`int`, `optional`, defaults to 0):\n+ from_process (`int`, *optional*, defaults to 0):\n The process from which to send the data.\n \n Returns:\n@@ -459,13 +458,13 @@ def slice_tensors(data, tensor_slice):\n Recursively takes a slice in a nested list/tuple/dictionary of tensors.\n \n Args:\n- data (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ data (nested list/tuple/dictionary of `torch.Tensor`):\n The data to slice.\n- tensor_slice (:obj:`slice`):\n+ tensor_slice (`slice`):\n The slice to take.\n \n Returns:\n- The same data structure as :obj:`data` with all the tensors slices.\n+ The same data structure as `data` with all the tensors slices.\n \"\"\"\n \n def _slice_tensor(tensor, tensor_slice):\n@@ -479,10 +478,10 @@ def find_batch_size(data):\n Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.\n \n Args:\n- data (nested list/tuple/dictionary of :obj:`torch.Tensor`): The data from which to find the batch size.\n+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.\n \n Returns:\n- :obj:`int`: The batch size.\n+ `int`: The batch size.\n \"\"\"\n if isinstance(data, (tuple, list)):\n return find_batch_size(data[0])\n@@ -499,13 +498,13 @@ def concatenate(data, dim=0):\n Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.\n \n Args:\n- data (nested list/tuple/dictionary of lists of tensors :obj:`torch.Tensor`):\n+ data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):\n The data to concatenate.\n- dim (:obj:`int`, `optional`, defaults to 0):\n+ dim (`int`, *optional*, defaults to 0):\n The dimension on which to concatenate.\n \n Returns:\n- The same data structure as :obj:`data` with all the tensors concatenated.\n+ The same data structure as `data` with all the tensors concatenated.\n \"\"\"\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n@@ -522,13 +521,13 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n can safely be gathered.\n \n Args:\n- tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to gather.\n- dim (:obj:`int`, `optional`, defaults to 0):\n+ dim (`int`, *optional*, defaults to 0):\n The dimension on which to pad.\n- pad_index (:obj:`int`, `optional`, defaults to 0):\n+ pad_index (`int`, *optional*, defaults to 0):\n The value with which to pad.\n- pad_first (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ pad_first (`bool`, *optional*, defaults to `False`):\n Whether to pad at the beginning or the end.\n \"\"\"\n \n@@ -566,9 +565,11 @@ def wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n \n- Warning::\n+ <Tip warning={true}>\n \n- Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n+ Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n+\n+ </Tip>\n \"\"\"\n if (\n AcceleratorState().distributed_type == DistributedType.MULTI_GPU\n@@ -582,7 +583,7 @@ def wait_for_everyone():\n \n def save(obj, f):\n \"\"\"\n- Save the data to disk. Use in place of :obj:`torch.save()`.\n+ Save the data to disk. Use in place of `torch.save()`.\n \n Args:\n obj: The data to save\n@@ -599,11 +600,11 @@ class PrepareForLaunch:\n Prepare a function that will launched in a distributed setup.\n \n Args:\n- launcher (:obj:`Callable`):\n+ launcher (`Callable`):\n The function to launch.\n- distributed_type (:class:`~accelerate.state.DistributedType`):\n+ distributed_type ([`~state.DistributedType`]):\n The distributed type to prepare for.\n- debug (:obj:`bool`, *optional*, defaults to :obj:`False`):\n+ debug (`bool`, *optional*, defaults to `False`):\n Whether or not this is a debug launch.\n \"\"\"\n \n@@ -680,9 +681,9 @@ def __post_init__(self):\n @contextmanager\n def patch_environment(**kwargs):\n \"\"\"\n- A context manager that will add each keyword argument passed to ``os.environ`` and remove them when exiting.\n+ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.\n \n- Will convert the values in :obj:`kwargs` to strings and upper-case all the keys.\n+ Will convert the values in `kwargs` to strings and upper-case all the keys.\n \"\"\"\n for key, value in kwargs.items():\n os.environ[key.upper()] = str(value)\n@@ -695,7 +696,7 @@ def patch_environment(**kwargs):\n \n def get_pretty_name(obj):\n \"\"\"\n- Gets a pretty name from ``obj``\n+ Gets a pretty name from `obj`.\n \"\"\"\n if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n obj = getattr(obj, \"__class__\", obj)\ndiff --git a/utils/style_doc.py b/utils/style_doc.py\nindex 0e363eaab..2a325805f 100644\n--- a/utils/style_doc.py\n+++ b/utils/style_doc.py\n@@ -1,6 +1,5 @@\n # coding=utf-8\n-\n-# Copyright 2021 The HuggingFace Inc. team.\n+# Copyright 2020 The HuggingFace Inc. team.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -13,78 +12,209 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-\n \"\"\"Style utils for the .rst and the docstrings.\"\"\"\n \n import argparse\n import os\n import re\n import warnings\n-from enum import Enum\n-\n-\n-# Special blocks where the inside should be formatted.\n-TEXTUAL_BLOCKS = [\"note\", \"warning\"]\n-# List of acceptable characters for titles and sections underline.\n-TITLE_SPECIAL_CHARS = \"\"\"= - ` : ' \" ~ ^ _ * + # < >\"\"\".split(\" \")\n-# Special words for docstrings (s? means the s is optional)\n-DOC_SPECIAL_WORD = [\n- \"Args?\",\n- \"Params?\",\n- \"Parameters?\",\n- \"Arguments?\",\n- \"Examples?\",\n- \"Usage\",\n- \"Returns?\",\n- \"Raises?\",\n- \"Attributes?\",\n-]\n+\n+import black\n+\n+\n+BLACK_AVOID_PATTERNS = {}\n+\n \n # Regexes\n-# Matches any declaration of textual block, like `.. note::`. (ignore case to avoid writing all versions in the list)\n-_re_textual_blocks = re.compile(r\"^\\s*\\.\\.\\s+(\" + \"|\".join(TEXTUAL_BLOCKS) + r\")\\s*::\\s*$\", re.IGNORECASE)\n-# Matches list introduction in rst.\n+# Re pattern that catches list introduction (with potential indent)\n _re_list = re.compile(r\"^(\\s*-\\s+|\\s*\\*\\s+|\\s*\\d+\\.\\s+)\")\n-# Matches the indent in a line.\n-_re_indent = re.compile(r\"^(\\s*)\\S\")\n-# Matches a table declaration in rst.\n-_re_table = re.compile(r\"(\\+-+)+\\+\\s*$\")\n-# Matches a code block in rst `:: `.\n-_re_code_block = re.compile(r\"^\\s*::\\s*$\")\n-# Matches any block of the form `.. something::` or `.. something:: bla`.\n-_re_ignore = re.compile(r\"^\\s*\\.\\.\\s+(.*?)\\s*::\\s*\\S*\\s*$\")\n-# Matches comment introduction in rst.\n-_re_comment = re.compile(r\"\\s*\\.\\.\\s*$\")\n+# Re pattern that catches code block introduction (with potentinal indent)\n+_re_code = re.compile(r\"^(\\s*)```(.*)$\")\n+# Re pattern that catches rst args blocks of the form `Parameters:`.\n+_re_args = re.compile(\"^\\s*(Args?|Arguments?|Params?|Parameters?):\\s*$\")\n+# Re pattern that catches return blocks of the form `Return:`.\n+_re_returns = re.compile(\"^\\s*Returns?:\\s*$\")\n # Matches the special tag to ignore some paragraphs.\n _re_doc_ignore = re.compile(r\"(\\.\\.|#)\\s*docstyle-ignore\")\n-# Matches the example introduction in docstrings.\n-_re_example = re.compile(r\"::\\s*$\")\n-# Matches the parameters introduction in docstrings.\n-_re_arg_def = re.compile(r\"^\\s*(Args?|Parameters?|Params|Arguments?|Environment|Attributes?)\\s*:\\s*$\")\n-# Matches the return introduction in docstrings.\n-_re_return = re.compile(r\"^\\s*(Returns?|Raises?|Note)\\s*:\\s*$\")\n-# Matches any doc special word.\n-_re_any_doc_special_word = re.compile(r\"^\\s*(\" + \"|\".join(DOC_SPECIAL_WORD) + r\")::?\\s*$\")\n+# Re pattern that matches <Tip>, </Tip> and <Tip warning={true}> blocks.\n+_re_tip = re.compile(\"^\\s*</?Tip(>|\\s+warning={true}>)\\s*$\")\n+\n+DOCTEST_PROMPTS = [\">>>\", \"...\"]\n+\n+\n+def is_empty_line(line):\n+ return len(line) == 0 or line.isspace()\n+\n+\n+def find_indent(line):\n+ \"\"\"\n+ Returns the number of spaces that start a line indent.\n+ \"\"\"\n+ search = re.search(\"^(\\s*)(?:\\S|$)\", line)\n+ if search is None:\n+ return 0\n+ return len(search.groups()[0])\n+\n+\n+def parse_code_example(code_lines):\n+ \"\"\"\n+ Parses a code example\n+\n+ Args:\n+ code_lines (`List[str]`): The code lines to parse.\n+ max_len (`int`): The maximum lengh per line.\n+\n+ Returns:\n+ (List[`str`], List[`str`]): The list of code samples and the list of outputs.\n+ \"\"\"\n+ has_doctest = code_lines[0][:3] in DOCTEST_PROMPTS\n+\n+ code_samples = []\n+ outputs = []\n+ in_code = True\n+ current_bit = []\n+\n+ for line in code_lines:\n+ if in_code and has_doctest and not is_empty_line(line) and line[:3] not in DOCTEST_PROMPTS:\n+ code_sample = \"\\n\".join(current_bit)\n+ code_samples.append(code_sample.strip())\n+ in_code = False\n+ current_bit = []\n+ elif not in_code and line[:3] in DOCTEST_PROMPTS:\n+ output = \"\\n\".join(current_bit)\n+ outputs.append(output.strip())\n+ in_code = True\n+ current_bit = []\n+\n+ # Add the line without doctest prompt\n+ if line[:3] in DOCTEST_PROMPTS:\n+ line = line[4:]\n+ current_bit.append(line)\n+\n+ # Add last sample\n+ if in_code:\n+ code_sample = \"\\n\".join(current_bit)\n+ code_samples.append(code_sample.strip())\n+ else:\n+ output = \"\\n\".join(current_bit)\n+ outputs.append(output.strip())\n+\n+ return code_samples, outputs\n \n \n-class SpecialBlock(Enum):\n- NOT_SPECIAL = 0\n- NO_STYLE = 1\n- ARG_LIST = 2\n+def format_code_example(code: str, max_len: int, in_docstring: bool = False):\n+ \"\"\"\n+ Format a code example using black. Will take into account the doctest syntax as well as any initial indentation in\n+ the code provided.\n \n+ Args:\n+ code (`str`): The code example to format.\n+ max_len (`int`): The maximum lengh per line.\n+ in_docstring (`bool`, *optional*, defaults to `False`): Whether or not the code example is inside a docstring.\n \n-def split_text_in_lines(text, max_len, prefix=\"\", min_indent=None):\n+ Returns:\n+ `str`: The formatted code.\n+ \"\"\"\n+ code_lines = code.split(\"\\n\")\n+\n+ # Find initial indent\n+ idx = 0\n+ while idx < len(code_lines) and is_empty_line(code_lines[idx]):\n+ idx += 1\n+ if idx >= len(code_lines):\n+ return \"\", \"\"\n+ indent = find_indent(code_lines[idx])\n+\n+ # Remove the initial indent for now, we will had it back after styling.\n+ # Note that l[indent:] works for empty lines\n+ code_lines = [l[indent:] for l in code_lines[idx:]]\n+ has_doctest = code_lines[0][:3] in DOCTEST_PROMPTS\n+\n+ code_samples, outputs = parse_code_example(code_lines)\n+\n+ # Let's blackify the code! We put everything in one big text to go faster.\n+ delimiter = \"\\n\\n### New code sample ###\\n\"\n+ full_code = delimiter.join(code_samples)\n+ line_length = max_len - indent\n+ if has_doctest:\n+ line_length -= 4\n+\n+ for k, v in BLACK_AVOID_PATTERNS.items():\n+ full_code = full_code.replace(k, v)\n+ try:\n+ mode = black.Mode(target_versions={black.TargetVersion.PY37}, line_length=line_length)\n+ formatted_code = black.format_str(full_code, mode=mode)\n+ error = \"\"\n+ except Exception as e:\n+ formatted_code = full_code\n+ error = f\"Code sample:\\n{full_code}\\n\\nError message:\\n{e}\"\n+\n+ # Let's get back the formatted code samples\n+ for k, v in BLACK_AVOID_PATTERNS.items():\n+ formatted_code = formatted_code.replace(v, k)\n+ # Triple quotes will mess docstrings.\n+ if in_docstring:\n+ formatted_code = formatted_code.replace('\"\"\"', \"'''\")\n+\n+ code_samples = formatted_code.split(delimiter)\n+ # We can have one output less than code samples\n+ if len(outputs) == len(code_samples) - 1:\n+ outputs.append(\"\")\n+\n+ formatted_lines = []\n+ for code_sample, output in zip(code_samples, outputs):\n+ # black may have added some new lines, we remove them\n+ code_sample = code_sample.strip()\n+ in_triple_quotes = False\n+ in_decorator = False\n+ for line in code_sample.strip().split(\"\\n\"):\n+ if has_doctest and not is_empty_line(line):\n+ prefix = (\n+ \"... \"\n+ if line.startswith(\" \") or line in [\")\", \"]\", \"}\"] or in_triple_quotes or in_decorator\n+ else \">>> \"\n+ )\n+ else:\n+ prefix = \"\"\n+ indent_str = \"\" if is_empty_line(line) else (\" \" * indent)\n+ formatted_lines.append(indent_str + prefix + line)\n+\n+ if '\"\"\"' in line:\n+ in_triple_quotes = not in_triple_quotes\n+ if line.startswith(\" \"):\n+ in_decorator = False\n+ if line.startswith(\"@\"):\n+ in_decorator = True\n+\n+ formatted_lines.extend([\" \" * indent + line for line in output.split(\"\\n\")])\n+ if not output.endswith(\"===PT-TF-SPLIT===\"):\n+ formatted_lines.append(\"\")\n+\n+ result = \"\\n\".join(formatted_lines)\n+ return result.rstrip(), error\n+\n+\n+def format_text(text, max_len, prefix=\"\", min_indent=None):\n \"\"\"\n- Split `text` in the biggest lines possible with the constraint of `max_len` using `prefix` on the first line and\n- then indenting with the same length as `prefix`.\n+ Format a text in the biggest lines possible with the constraint of a maximum length and an indentation.\n+\n+ Args:\n+ text (`str`): The text to format\n+ max_len (`int`): The maximum length per line to use\n+ prefix (`str`, *optional*, defaults to `\"\"`): A prefix that will be added to the text.\n+ The prefix doesn't count toward the indent (like a - introducing a list).\n+ min_indent (`int`, *optional*): The minimum indent of the text.\n+ If not set, will default to the length of the `prefix`.\n+\n+ Returns:\n+ `str`: The formatted text.\n \"\"\"\n text = re.sub(r\"\\s+\", \" \", text)\n- indent = \" \" * len(prefix)\n if min_indent is not None:\n- if len(indent) < len(min_indent):\n- indent = min_indent\n- if len(prefix) < len(min_indent):\n- prefix = \" \" * (len(min_indent) - len(prefix)) + prefix\n+ if len(prefix) < min_indent:\n+ prefix = \" \" * (min_indent - len(prefix)) + prefix\n+\n+ indent = \" \" * len(prefix)\n new_lines = []\n words = text.split(\" \")\n current_line = f\"{prefix}{words[0]}\"\n@@ -99,379 +229,189 @@ def split_text_in_lines(text, max_len, prefix=\"\", min_indent=None):\n return \"\\n\".join(new_lines)\n \n \n-def get_indent(line):\n- \"\"\"Get the indentation of `line`.\"\"\"\n- indent_search = _re_indent.search(line)\n- return indent_search.groups()[0] if indent_search is not None else \"\"\n-\n-\n-class CodeStyler:\n- \"\"\"A generic class to style .rst files.\"\"\"\n-\n- def is_no_style_block(self, line):\n- \"\"\"Whether or not `line` introduces a block where styling should be ignore\"\"\"\n- if _re_code_block.search(line) is not None:\n- return True\n- if _re_textual_blocks.search(line) is not None:\n- return False\n- return _re_ignore.search(line) is not None\n-\n- def is_comment_or_textual_block(self, line):\n- \"\"\"Whether or not `line` introduces a block where styling should not be ignored (note, warnings...)\"\"\"\n- if _re_comment.search(line):\n- return True\n- return _re_textual_blocks.search(line) is not None\n-\n- def is_special_block(self, line):\n- \"\"\"Whether or not `line` introduces a special block.\"\"\"\n- if self.is_no_style_block(line):\n- self.in_block = SpecialBlock.NO_STYLE\n- return True\n- return False\n-\n- def init_in_block(self, text):\n- \"\"\"\n- Returns the initial value for `self.in_block`.\n-\n- Useful for some docstrings beginning inside an argument declaration block (all models).\n- \"\"\"\n- return SpecialBlock.NOT_SPECIAL\n-\n- def end_of_special_style(self, line):\n- \"\"\"\n- Sets back the `in_block` attribute to `NOT_SPECIAL`.\n-\n- Useful for some docstrings where we may have to go back to `ARG_LIST` instead.\n- \"\"\"\n- self.in_block = SpecialBlock.NOT_SPECIAL\n-\n- def style_paragraph(self, paragraph, max_len, no_style=False, min_indent=None):\n- \"\"\"\n- Style `paragraph` (a list of lines) by making sure no line goes over `max_len`, except if the `no_style` flag\n- is passed.\n- \"\"\"\n- if len(paragraph) == 0:\n- return \"\"\n- if no_style or self.in_block == SpecialBlock.NO_STYLE:\n- return \"\\n\".join(paragraph)\n- if _re_list.search(paragraph[0]) is not None:\n- # Great, we're in a list. So we need to split our paragraphs in smaller parts, one for each item.\n- result = \"\"\n- remainder = \"\"\n- prefix = _re_list.search(paragraph[0]).groups()[0]\n- prefix_indent = get_indent(paragraph[0])\n- current_item = [paragraph[0][len(prefix) :]]\n- for i, line in enumerate(paragraph[1:]):\n- new_item_search = _re_list.search(line)\n- indent = get_indent(line)\n- if len(indent) < len(prefix_indent) or (len(indent) == len(prefix_indent) and new_item_search is None):\n- # There might not be an empty line after the list, formatting the remainder recursively.\n- remainder = \"\\n\" + self.style_paragraph(\n- paragraph[i + 1 :], max_len, no_style=no_style, min_indent=min_indent\n- )\n- break\n- elif new_item_search is not None:\n- text = \" \".join([l.strip() for l in current_item])\n- result += split_text_in_lines(text, max_len, prefix, min_indent=min_indent) + \"\\n\"\n- prefix = new_item_search.groups()[0]\n- prefix_indent = indent\n- current_item = [line[len(prefix) :]]\n- else:\n- current_item.append(line)\n- # Treat the last item\n- text = \" \".join([l.strip() for l in current_item])\n- result += split_text_in_lines(text, max_len, prefix, min_indent=min_indent)\n- # Add the potential remainder\n- return result + remainder\n-\n- if len(paragraph) > 1 and self.is_comment_or_textual_block(paragraph[0]):\n- # Comments/notes in rst should be restyled with indentation, ignoring the first line.\n- indent = get_indent(paragraph[1])\n- text = \" \".join([l.strip() for l in paragraph[1:]])\n- return paragraph[0] + \"\\n\" + split_text_in_lines(text, max_len, indent, min_indent=min_indent)\n-\n- if self.in_block == SpecialBlock.ARG_LIST:\n- # Arg lists are special: we need to ignore the lines that are at the first indentation level beneath the\n- # Args/Parameters (parameter description), then we can style the indentation level beneath.\n- result = \"\"\n- # The args/parameters could be in that paragraph and should be ignored\n- if _re_arg_def.search(paragraph[0]) is not None:\n- if len(paragraph) == 1:\n- return paragraph[0]\n- result += paragraph[0] + \"\\n\"\n- paragraph = paragraph[1:]\n-\n- if self.current_indent is None:\n- self.current_indent = get_indent(paragraph[1])\n-\n- current_item = []\n- for line in paragraph:\n- if get_indent(line) == self.current_indent:\n- if len(current_item) > 0:\n- item_indent = get_indent(current_item[0])\n- text = \" \".join([l.strip() for l in current_item])\n- result += split_text_in_lines(text, max_len, item_indent, min_indent=min_indent) + \"\\n\"\n- result += line + \"\\n\"\n- current_item = []\n- else:\n- current_item.append(line)\n- if len(current_item) > 0:\n- item_indent = get_indent(current_item[0])\n- text = \" \".join([l.strip() for l in current_item])\n- result += split_text_in_lines(text, max_len, item_indent, min_indent=min_indent) + \"\\n\"\n- return result[:-1]\n-\n- indent = get_indent(paragraph[0])\n- text = \" \".join([l.strip() for l in paragraph])\n- return split_text_in_lines(text, max_len, indent, min_indent=min_indent)\n-\n- def style(self, text, max_len=119, min_indent=None):\n- \"\"\"Style `text` to `max_len`.\"\"\"\n- new_lines = []\n- paragraph = []\n- self.current_indent = \"\"\n- self.previous_indent = None\n- # If one of those is True, the paragraph should not be touched (code samples, lists...)\n- no_style = False\n- no_style_next = False\n- self.in_block = self.init_in_block(text)\n- # If this is True, we force-break a paragraph, even if there is no new empty line.\n- break_paragraph = False\n-\n- lines = text.split(\"\\n\")\n- last_line = None\n- for line in lines:\n- # New paragraph\n- line_is_empty = len(line.strip()) == 0\n- list_begins = (\n- _re_list.search(line) is not None\n- and last_line is not None\n- and len(get_indent(line)) > len(get_indent(last_line))\n- )\n- if line_is_empty or break_paragraph or list_begins:\n- if len(paragraph) > 0:\n- if self.in_block != SpecialBlock.NOT_SPECIAL:\n- indent = get_indent(paragraph[0])\n- # Are we still in a no-style block?\n- if self.current_indent is None:\n- # If current_indent is None, we haven't begun the interior of the block so the answer is\n- # yes, unless we have an indent of 0 in which case the special block took one line only.\n- if len(indent) == 0:\n- self.in_block = SpecialBlock.NOT_SPECIAL\n- else:\n- self.current_indent = indent\n- elif not indent.startswith(self.current_indent):\n- # If not, we are leaving the block when we unindent.\n- self.end_of_special_style(paragraph[0])\n-\n- if self.is_special_block(paragraph[0]):\n- # Maybe we are starting a special block.\n- if len(paragraph) > 1:\n- # If we have the interior of the block in the paragraph, we grab the indent.\n- self.current_indent = get_indent(paragraph[1])\n- else:\n- # We will determine the indent with the next paragraph\n- self.current_indent = None\n- styled_paragraph = self.style_paragraph(\n- paragraph, max_len, no_style=no_style, min_indent=min_indent\n- )\n- new_lines.append(styled_paragraph + \"\\n\")\n- else:\n- new_lines.append(\"\")\n-\n- paragraph = []\n- no_style = no_style_next\n- no_style_next = False\n- last_line = None\n- if (not break_paragraph and not list_begins) or line_is_empty:\n- break_paragraph = False\n- continue\n- break_paragraph = False\n-\n- # Title and section lines should go to the max + add a new paragraph.\n- if (\n- len(set(line)) == 1\n- and line[0] in TITLE_SPECIAL_CHARS\n- and last_line is not None\n- and len(line) >= len(last_line)\n- ):\n- line = line[0] * max_len\n- break_paragraph = True\n- # proper doc comment indicates the next paragraph should be no-style.\n- if _re_doc_ignore.search(line) is not None:\n- no_style_next = True\n- # Table are in just one paragraph and should be no-style.\n- if _re_table.search(line) is not None:\n- no_style = True\n- paragraph.append(line)\n- last_line = line\n-\n- # Just have to treat the last paragraph. It could still be in a no-style block (or not)\n- if len(paragraph) > 0:\n- # Are we still in a special block\n- # (if current_indent is None, we are but no need to set it since we are the end.)\n- if self.in_block != SpecialBlock.NO_STYLE and self.current_indent is not None:\n- indent = get_indent(paragraph[0])\n- if not indent.startswith(self.current_indent):\n- self.in_block = SpecialBlock.NOT_SPECIAL\n- _ = self.is_special_block(paragraph[0])\n- new_lines.append(self.style_paragraph(paragraph, max_len, no_style=no_style, min_indent=min_indent) + \"\\n\")\n- return \"\\n\".join(new_lines)\n-\n-\n-class DocstringStyler(CodeStyler):\n- \"\"\"Class to style docstrings that take the main method from `CodeStyler`.\"\"\"\n-\n- def is_no_style_block(self, line):\n- if _re_textual_blocks.search(line) is not None:\n- return False\n- if _re_example.search(line) is not None:\n- return True\n- return _re_code_block.search(line) is not None\n-\n- def is_comment_or_textual_block(self, line):\n- if _re_return.search(line) is not None:\n- self.in_block = SpecialBlock.NOT_SPECIAL\n- return True\n- return super().is_comment_or_textual_block(line)\n-\n- def is_special_block(self, line):\n- if self.is_no_style_block(line):\n- if self.previous_indent is None and self.in_block == SpecialBlock.ARG_LIST:\n- self.previous_indent = self.current_indent\n- self.in_block = SpecialBlock.NO_STYLE\n- return True\n- if _re_arg_def.search(line) is not None:\n- self.in_block = SpecialBlock.ARG_LIST\n- return True\n- return False\n-\n- def end_of_special_style(self, line):\n- if self.previous_indent is not None and line.startswith(self.previous_indent):\n- self.in_block = SpecialBlock.ARG_LIST\n- self.current_indent = self.previous_indent\n- else:\n- self.in_block = SpecialBlock.NOT_SPECIAL\n- self.previous_indent = None\n-\n- def init_in_block(self, text):\n- lines = text.split(\"\\n\")\n- while len(lines) > 0 and len(lines[0]) == 0:\n- lines = lines[1:]\n- if len(lines) == 0:\n- return SpecialBlock.NOT_SPECIAL\n- if re.search(r\":\\s*$\", lines[0]):\n- indent = get_indent(lines[0])\n- if (\n- len(lines) == 1\n- or len(get_indent(lines[1])) > len(indent)\n- or (len(get_indent(lines[1])) == len(indent) and re.search(r\":\\s*$\", lines[1]))\n- ):\n- self.current_indent = indent\n- return SpecialBlock.ARG_LIST\n- return SpecialBlock.NOT_SPECIAL\n-\n-\n-rst_styler = CodeStyler()\n-doc_styler = DocstringStyler()\n-\n-\n-def _add_new_lines_before_list(text):\n- \"\"\"Add a new empty line before a list begins.\"\"\"\n- lines = text.split(\"\\n\")\n- new_lines = []\n- in_list = False\n- for idx, line in enumerate(lines):\n- # Detect if the line is the start of a new list.\n- if _re_list.search(line) is not None and not in_list:\n- current_indent = get_indent(line)\n- in_list = True\n- # If the line before is non empty, add an extra new line.\n- if idx > 0 and len(lines[idx - 1]) != 0:\n- new_lines.append(\"\")\n- # Detect if we're out of the current list.\n- if in_list and not line.startswith(current_indent) and _re_list.search(line) is None:\n- in_list = False\n- new_lines.append(line)\n- return \"\\n\".join(new_lines)\n+def split_line_on_first_colon(line):\n+ splits = line.split(\":\")\n+ return splits[0], \":\".join(splits[1:])\n \n \n-def _add_new_lines_before_doc_special_words(text):\n- lines = text.split(\"\\n\")\n+def style_docstring(docstring, max_len):\n+ \"\"\"\n+ Style a docstring by making sure there is no useless whitespace and the maximum horizontal space is used.\n+\n+ Args:\n+ docstring (`str`): The docstring to style.\n+ max_len (`int`): The maximum length of each line.\n+\n+ Returns:\n+ `str`: The styled docstring\n+ \"\"\"\n+ lines = docstring.split(\"\\n\")\n new_lines = []\n+\n+ # Initialization\n+ current_paragraph = None\n+ current_indent = -1\n+ in_code = False\n+ param_indent = -1\n+ prefix = \"\"\n+ black_errors = []\n+\n+ # Special case for docstrings that begin with continuation of Args with no Args block.\n+ idx = 0\n+ while idx < len(lines) and is_empty_line(lines[idx]):\n+ idx += 1\n+ if (\n+ len(lines[idx]) > 1\n+ and lines[idx].rstrip().endswith(\":\")\n+ and find_indent(lines[idx + 1]) > find_indent(lines[idx])\n+ ):\n+ param_indent = find_indent(lines[idx])\n+\n for idx, line in enumerate(lines):\n- # Detect if the line is the start of a new list.\n- if _re_any_doc_special_word.search(line) is not None:\n- # If the line before is non empty, add an extra new line.\n- if idx > 0 and len(lines[idx - 1]) != 0:\n+ # Doing all re searches once for the one we need to repeat.\n+ list_search = _re_list.search(line)\n+ code_search = _re_code.search(line)\n+\n+ # Are we starting a new paragraph?\n+ # New indentation or new line:\n+ new_paragraph = find_indent(line) != current_indent or is_empty_line(line)\n+ # List item\n+ new_paragraph = new_paragraph or list_search is not None\n+ # Code block beginning\n+ new_paragraph = new_paragraph or code_search is not None\n+ # Beginning/end of tip\n+ new_paragraph = new_paragraph or _re_tip.search(line)\n+\n+ # In this case, we treat the current paragraph\n+ if not in_code and new_paragraph and current_paragraph is not None and len(current_paragraph) > 0:\n+ paragraph = \" \".join(current_paragraph)\n+ new_lines.append(format_text(paragraph, max_len, prefix=prefix, min_indent=current_indent))\n+ current_paragraph = None\n+\n+ if code_search is not None:\n+ if not in_code:\n+ current_paragraph = []\n+ current_indent = len(code_search.groups()[0])\n+ current_code = code_search.groups()[1]\n+ prefix = \"\"\n+ if current_indent < param_indent:\n+ param_indent = -1\n+ else:\n+ current_indent = -1\n+ code = \"\\n\".join(current_paragraph)\n+ if current_code in [\"py\", \"python\"]:\n+ formatted_code, error = format_code_example(code, max_len, in_docstring=True)\n+ new_lines.append(formatted_code)\n+ if len(error) > 0:\n+ black_errors.append(error)\n+ else:\n+ new_lines.append(code)\n+ current_paragraph = None\n+ new_lines.append(line)\n+ in_code = not in_code\n+\n+ elif in_code:\n+ current_paragraph.append(line)\n+ elif is_empty_line(line):\n+ current_paragraph = None\n+ current_indent = -1\n+ prefix = \"\"\n+ new_lines.append(line)\n+ elif list_search is not None:\n+ prefix = list_search.groups()[0]\n+ current_indent = len(prefix)\n+ current_paragraph = [line[current_indent:]]\n+ elif _re_args.search(line):\n+ new_lines.append(line)\n+ param_indent = find_indent(lines[idx + 1])\n+ elif _re_tip.search(line):\n+ # Add a new line before if not present\n+ if not is_empty_line(new_lines[-1]):\n new_lines.append(\"\")\n- new_lines.append(line)\n- return \"\\n\".join(new_lines)\n-\n+ new_lines.append(line)\n+ # Add a new line after if not present\n+ if idx < len(lines) - 1 and not is_empty_line(lines[idx + 1]):\n+ new_lines.append(\"\")\n+ elif current_paragraph is None or find_indent(line) != current_indent:\n+ indent = find_indent(line)\n+ # Special behavior for parameters intros.\n+ if indent == param_indent:\n+ # Special rules for some docstring where the Returns blocks has the same indent as the parameters.\n+ if _re_returns.search(line) is not None:\n+ param_indent = -1\n+ new_lines.append(line)\n+ elif len(line) < max_len:\n+ new_lines.append(line)\n+ else:\n+ intro, description = split_line_on_first_colon(line)\n+ new_lines.append(intro + \":\")\n+ if len(description) != 0:\n+ if find_indent(lines[idx + 1]) > indent:\n+ current_indent = find_indent(lines[idx + 1])\n+ else:\n+ current_indent = indent + 4\n+ current_paragraph = [description.strip()]\n+ prefix = \"\"\n+ else:\n+ # Check if we have exited the parameter block\n+ if indent < param_indent:\n+ param_indent = -1\n \n-def style_rst_file(doc_file, max_len=119, check_only=False):\n- \"\"\" Style one rst file `doc_file` to `max_len`.\"\"\"\n- with open(doc_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n- doc = f.read()\n+ current_paragraph = [line.strip()]\n+ current_indent = find_indent(line)\n+ prefix = \"\"\n+ elif current_paragraph is not None:\n+ current_paragraph.append(line.lstrip())\n \n- # Add missing new lines before lists\n- clean_doc = _add_new_lines_before_list(doc)\n- # Style\n- clean_doc = rst_styler.style(clean_doc, max_len=max_len)\n+ if current_paragraph is not None and len(current_paragraph) > 0:\n+ paragraph = \" \".join(current_paragraph)\n+ new_lines.append(format_text(paragraph, max_len, prefix=prefix, min_indent=current_indent))\n \n- diff = clean_doc != doc\n- if not check_only and diff:\n- print(f\"Overwriting content of {doc_file}.\")\n- with open(doc_file, \"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n- f.write(clean_doc)\n-\n- return diff\n-\n-\n-def style_docstring(docstring, max_len=119):\n- \"\"\"Style `docstring` to `max_len`.\"\"\"\n- # One-line docstring that are not too long are left as is.\n- if len(docstring) < max_len and \"\\n\" not in docstring:\n- return docstring\n-\n- # Grab the indent from the last line\n- last_line = docstring.split(\"\\n\")[-1]\n- # Is it empty except for the last triple-quotes (not-included in `docstring`)?\n- indent_search = re.search(r\"^(\\s*)$\", last_line)\n- if indent_search is not None:\n- indent = indent_search.groups()[0]\n- if len(indent) > 0:\n- docstring = docstring[: -len(indent)]\n- # Or are the triple quotes next to text (we will fix that).\n- else:\n- indent_search = _re_indent.search(last_line)\n- indent = indent_search.groups()[0] if indent_search is not None else \"\"\n+ return \"\\n\".join(new_lines), \"\\n\\n\".join(black_errors)\n \n- # Add missing new lines before Args/Returns etc.\n- docstring = _add_new_lines_before_doc_special_words(docstring)\n- # Add missing new lines before lists\n- docstring = _add_new_lines_before_list(docstring)\n- # Style\n- styled_doc = doc_styler.style(docstring, max_len=max_len, min_indent=indent)\n \n- # Add new lines if necessary\n- if not styled_doc.startswith(\"\\n\"):\n- styled_doc = \"\\n\" + styled_doc\n- if not styled_doc.endswith(\"\\n\"):\n- styled_doc += \"\\n\"\n- return styled_doc + indent\n+def style_docstrings_in_code(code, max_len=119):\n+ \"\"\"\n+ Style all docstrings in some code.\n \n+ Args:\n+ code (`str`): The code in which we want to style the docstrings.\n+ max_len (`int`): The maximum number of characters per line.\n \n-def style_file_docstrings(code_file, max_len=119, check_only=False):\n- \"\"\"Style all docstrings in `code_file` to `max_len`.\"\"\"\n- with open(code_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n- code = f.read()\n- splits = code.split('\"\"\"')\n+ Returns:\n+ `Tuple[str, str]`: A tuple with the clean code and the black errors (if any)\n+ \"\"\"\n+ # fmt: off\n+ splits = code.split('\\\"\\\"\\\"')\n splits = [\n (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))\n for i, s in enumerate(splits)\n ]\n- clean_code = '\"\"\"'.join(splits)\n+ black_errors = \"\\n\\n\".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0])\n+ splits = [s[0] if isinstance(s, tuple) else s for s in splits]\n+ clean_code = '\\\"\\\"\\\"'.join(splits)\n+ # fmt: on\n+\n+ return clean_code, black_errors\n+\n+\n+def style_file_docstrings(code_file, max_len=119, check_only=False):\n+ \"\"\"\n+ Style all docstrings in a given file.\n+\n+ Args:\n+ code_file (`str` or `os.PathLike`): The file in which we want to style the docstring.\n+ max_len (`int`): The maximum number of characters per line.\n+ check_only (`bool`, *optional*, defaults to `False`):\n+ Whether to restyle file or just check if they should be restyled.\n+\n+ Returns:\n+ `bool`: Whether or not the file was or should be restyled.\n+ \"\"\"\n+ with open(code_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n+ code = f.read()\n+\n+ clean_code, black_errors = style_docstrings_in_code(code, max_len=max_len)\n \n diff = clean_code != code\n if not check_only and diff:\n@@ -479,31 +419,122 @@ def style_file_docstrings(code_file, max_len=119, check_only=False):\n with open(code_file, \"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n f.write(clean_code)\n \n- return diff\n+ return diff, black_errors\n+\n+\n+def style_mdx_file(mdx_file, max_len=119, check_only=False):\n+ \"\"\"\n+ Style a MDX file by formatting all Python code samples.\n+\n+ Args:\n+ mdx_file (`str` or `os.PathLike`): The file in which we want to style the examples.\n+ max_len (`int`): The maximum number of characters per line.\n+ check_only (`bool`, *optional*, defaults to `False`):\n+ Whether to restyle file or just check if they should be restyled.\n+\n+ Returns:\n+ `bool`: Whether or not the file was or should be restyled.\n+ \"\"\"\n+ with open(mdx_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n+ content = f.read()\n+\n+ lines = content.split(\"\\n\")\n+ current_code = []\n+ current_language = \"\"\n+ in_code = False\n+ new_lines = []\n+ black_errors = []\n+\n+ for line in lines:\n+ if _re_code.search(line) is not None:\n+ in_code = not in_code\n+ if in_code:\n+ current_language = _re_code.search(line).groups()[1]\n+ current_code = []\n+ else:\n+ code = \"\\n\".join(current_code)\n+ if current_language in [\"py\", \"python\"]:\n+ code, error = format_code_example(code, max_len)\n+ if len(error) > 0:\n+ black_errors.append(error)\n+ new_lines.append(code)\n+\n+ new_lines.append(line)\n+ elif in_code:\n+ current_code.append(line)\n+ else:\n+ new_lines.append(line)\n+\n+ if in_code:\n+ raise ValueError(f\"There was a problem when styling {mdx_file}. A code block is opened without being closed.\")\n+\n+ clean_content = \"\\n\".join(new_lines)\n+ diff = clean_content != content\n+ if not check_only and diff:\n+ print(f\"Overwriting content of {mdx_file}.\")\n+ with open(mdx_file, \"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n+ f.write(clean_content)\n+\n+ return diff, \"\\n\\n\".join(black_errors)\n \n \n def style_doc_files(*files, max_len=119, check_only=False):\n \"\"\"\n- Style all `files` to `max_len` and fixes mistakes if not `check_only`, otherwise raises an error if styling should\n- be done.\n+ Applies doc styling or checks everything is correct in a list of files.\n+\n+ Args:\n+ files (several `str` or `os.PathLike`): The files to treat.\n+ max_len (`int`): The maximum number of characters per line.\n+ check_only (`bool`, *optional*, defaults to `False`):\n+ Whether to restyle file or just check if they should be restyled.\n+\n+ Returns:\n+ List[`str`]: The list of files changed or that should be restyled.\n \"\"\"\n changed = []\n+ black_errors = []\n for file in files:\n # Treat folders\n if os.path.isdir(file):\n files = [os.path.join(file, f) for f in os.listdir(file)]\n- files = [f for f in files if os.path.isdir(f) or f.endswith(\".rst\") or f.endswith(\".py\")]\n+ files = [f for f in files if os.path.isdir(f) or f.endswith(\".mdx\") or f.endswith(\".py\")]\n changed += style_doc_files(*files, max_len=max_len, check_only=check_only)\n- # Treat rst\n- elif file.endswith(\".rst\"):\n- if style_rst_file(file, max_len=max_len, check_only=check_only):\n- changed.append(file)\n+ # Treat mdx\n+ elif file.endswith(\".mdx\"):\n+ try:\n+ diff, black_error = style_mdx_file(file, max_len=max_len, check_only=check_only)\n+ if diff:\n+ changed.append(file)\n+ if len(black_error) > 0:\n+ black_errors.append(\n+ f\"There was a problem while formatting an example in {file} with black:\\m{black_error}\"\n+ )\n+ except Exception:\n+ print(f\"There is a problem in {file}.\")\n+ raise\n # Treat python files\n elif file.endswith(\".py\"):\n- if style_file_docstrings(file, max_len=max_len, check_only=check_only):\n- changed.append(file)\n+ try:\n+ diff, black_error = style_file_docstrings(file, max_len=max_len, check_only=check_only)\n+ if diff:\n+ changed.append(file)\n+ if len(black_error) > 0:\n+ black_errors.append(\n+ f\"There was a problem while formatting an example in {file} with black:\\m{black_error}\"\n+ )\n+ except Exception:\n+ print(f\"There is a problem in {file}.\")\n+ raise\n else:\n- warnings.warn(f\"Ignoring {file} because it's not a py or an rst file or a folder.\")\n+ warnings.warn(f\"Ignoring {file} because it's not a py or an mdx file or a folder.\")\n+ if len(black_errors) > 0:\n+ black_message = \"\\n\\n\".join(black_errors)\n+ raise ValueError(\n+ \"Some code examples can't be interpreted by black, which means they aren't regular python:\\n\\n\"\n+ + black_message\n+ + \"\\n\\nMake sure to fix the corresponding docstring or doc file, or remove the py/python after ``` if it \"\n+ + \"was not supposed to be a Python code sample.\"\n+ )\n return changed\n \n \n", "code_comments": [], "context": [ { "body": "_The documentation is not available anymore as the PR was closed or merged._", "from_author": false }, { "body": "Even if everything is pushed properly on `doc-build-dev`, there is nothing at https://moon-ci-docs.huggingface.co/docs/accelerate/pr_271. Guessing something needs to be activated on moon-landing @mishig25 ?", "from_author": true }, { "body": "Indeed, there needs to be a small update on moon-landing side. More[ info here](https://huggingface.slack.com/archives/C02GLJ5S0E9/p1646780697260569?thread_ts=1646410559.029439&cid=C02GLJ5S0E9)\r\n<img width=\"1000\" alt=\"Screenshot 2022-03-09 at 00 02 02\" src=\"https://user-images.githubusercontent.com/11827707/157340774-6481b81c-e5ea-460c-a395-632aad53783c.png\">\r\n", "from_author": false }, { "body": "I think `~` before `Accelerator` should not be there\r\n<img width=\"400\" alt=\"Screenshot 2022-03-09 at 00 06 33\" src=\"https://user-images.githubusercontent.com/11827707/157340963-a88cd88c-ac76-41aa-b1e2-71c6417f8130.png\">\r\n", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/271", "pr_id": 874536042 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1254c19e9..0fd2a5701 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -22,7 +22,7 @@\n \n from packaging import version\n \n-from .checkpointing import load_accelerator_state, save_accelerator_state\n+from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\n from .data_loader import prepare_data_loader\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n@@ -33,6 +33,7 @@\n convert_outputs_to_fp32,\n extract_model_from_parallel,\n gather,\n+ get_pretty_name,\n pad_across_processes,\n save,\n wait_for_everyone,\n@@ -188,6 +189,7 @@ def __init__(\n # Internal references to the training objects\n self._optimizers = []\n self._models = []\n+ self._custom_objects = []\n \n # RNG Types\n self.rng_types = rng_types\n@@ -564,7 +566,7 @@ def save(self, obj, f):\n \n def save_state(self, output_dir: str):\n \"\"\"\n- Saves the current states of the model, optimizer, scaler, and RNG generators.\n+ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n \n Args:\n output_dir (:obj:`str` or :obj:`os.PathLike`):\n@@ -575,11 +577,16 @@ def save_state(self, output_dir: str):\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving current state to {output_dir}\")\n weights = [self.get_state_dict(m) for m in self._models]\n- return save_accelerator_state(output_dir, weights, self._optimizers, self.state.process_index, self.scaler)\n+ save_location = save_accelerator_state(\n+ output_dir, weights, self._optimizers, self.state.process_index, self.scaler\n+ )\n+ for i, obj in enumerate(self._custom_objects):\n+ save_custom_state(obj, output_dir, i)\n+ return save_location\n \n def load_state(self, input_dir: str):\n \"\"\"\n- Loads the current states of the model, optimizer, scaler, and RNG generators.\n+ Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.\n \n Args:\n input_dir (:obj:`str` or :obj:`os.PathLike`):\n@@ -591,6 +598,16 @@ def load_state(self, input_dir: str):\n raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n logger.info(f\"Loading states from {input_dir}\")\n load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n+ custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n+ if len(custom_checkpoints) != len(self._custom_objects):\n+ err = \"Warning! Number of found checkpoints does not match the number of registered objects:\"\n+ err += f\"\\n\\tFound checkpoints: {len(custom_checkpoints)}\"\n+ err += f\"\\n\\tRegistered objects: {len(self._custom_objects)}\\nSkipping.\"\n+ logger.warn(err)\n+ else:\n+ logger.info(f\"Loading in {len(custom_checkpoints)} custom states\")\n+ for index, obj in enumerate(self._custom_objects):\n+ load_custom_state(obj, input_dir, index)\n \n def free_memory(self):\n \"\"\"\n@@ -646,6 +663,26 @@ def get_state_dict(self, model):\n \n return state_dict\n \n+ def register_for_checkpointing(self, *objects):\n+ \"\"\"\n+ Makes note of `objects` and will save or load them in during `save_state` or `load_state`.\n+\n+ These should be utilized when the state is being loaded or saved in the same script. It is not designed to be\n+ used in different scripts\n+\n+ Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored.\n+ \"\"\"\n+ invalid_objects = []\n+ for obj in objects:\n+ if not hasattr(obj, \"state_dict\") or not hasattr(obj, \"load_state_dict\"):\n+ invalid_objects.append(obj)\n+ if len(invalid_objects) > 0:\n+ err = \"All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:\"\n+ for index, obj in enumerate(invalid_objects):\n+ err += f\"\\n\\t- Item at index {index}, `{get_pretty_name(obj)}`\"\n+ raise ValueError(err)\n+ self._custom_objects.extend(objects)\n+\n @contextmanager\n def autocast(self):\n \"\"\"\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nindex 37d68b90c..e1a28d108 100644\n--- a/src/accelerate/checkpointing.py\n+++ b/src/accelerate/checkpointing.py\n@@ -14,6 +14,7 @@\n \n import os\n import random\n+from pathlib import Path\n from typing import List\n \n import numpy as np\n@@ -21,7 +22,7 @@\n from torch.cuda.amp import GradScaler\n \n from .state import is_tpu_available\n-from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, get_pretty_name, save\n \n \n if is_tpu_available():\n@@ -132,3 +133,22 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n if is_tpu_available():\n xm.set_rng_state(states[\"xm_seed\"])\n logger.info(\"All random states loaded successfully\")\n+\n+\n+def save_custom_state(obj, path, index: int = 0):\n+ \"\"\"\n+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`\n+ \"\"\"\n+ # Should this be the right way to get a qual_name type value from `obj`?\n+ save_location = Path(path) / f\"custom_checkpoint_{index}.pkl\"\n+ logger.info(f\"Saving the state of {get_pretty_name(obj)} to {save_location}\")\n+ torch.save(obj.state_dict(), save_location)\n+\n+\n+def load_custom_state(obj, path, index: int = 0):\n+ \"\"\"\n+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`\n+ \"\"\"\n+ load_location = f\"{path}/custom_checkpoint_{index}.pkl\"\n+ logger.info(f\"Loading the state of {get_pretty_name(obj)} from {load_location}\")\n+ obj.load_state_dict(torch.load(load_location))\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 62b1291ee..4b20cc1c3 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -691,3 +691,16 @@ def patch_environment(**kwargs):\n \n for key in kwargs:\n del os.environ[key.upper()]\n+\n+\n+def get_pretty_name(obj):\n+ \"\"\"\n+ Gets a pretty name from ``obj``\n+ \"\"\"\n+ if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n+ obj = getattr(obj, \"__class__\", obj)\n+ if hasattr(obj, \"__qualname__\"):\n+ return obj.__qualname__\n+ if hasattr(obj, \"__name__\"):\n+ return obj.__name__\n+ return str(obj)\ndiff --git a/tests/test_state_checkpointing.py b/tests/test_state_checkpointing.py\nindex a74dcb724..87b2d3b06 100644\n--- a/tests/test_state_checkpointing.py\n+++ b/tests/test_state_checkpointing.py\n@@ -43,13 +43,13 @@ def get_dataset(n_batches):\n return (train_dataloader, valid_dataloader)\n \n \n-def train(num_epochs, model, dataloader, optimizer, accelerator):\n+def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None):\n \"Trains for `num_epochs`\"\n rands = []\n for epoch in range(num_epochs):\n # Train quickly\n model.train()\n- for step, batch in enumerate(dataloader):\n+ for batch in dataloader:\n x, y = batch\n outputs = model(x)\n loss = torch.nn.functional.mse_loss(outputs, y)\n@@ -57,6 +57,8 @@ def train(num_epochs, model, dataloader, optimizer, accelerator):\n optimizer.step()\n optimizer.zero_grad()\n rands.append(random.random()) # Introduce some randomness\n+ if scheduler is not None:\n+ scheduler.step()\n return rands\n \n \n@@ -123,3 +125,41 @@ def test_can_resume_training(self):\n self.assertEqual(b1, b3)\n self.assertEqual(opt_state1, opt_state3)\n self.assertEqual(ground_truth_rands, test_rands)\n+\n+ def test_invalid_registration(self):\n+ t = torch.tensor([1, 2, 3])\n+ t1 = torch.tensor([2, 3, 4])\n+ net = DummyModel()\n+ opt = torch.optim.Adam(net.parameters())\n+ accelerator = Accelerator()\n+ with self.assertRaises(ValueError) as ve:\n+ accelerator.register_for_checkpointing(t, t1, net, opt)\n+ message = str(ve.exception)\n+ self.assertTrue(\"Item at index 0\" in message)\n+ self.assertTrue(\"Item at index 1\" in message)\n+ self.assertFalse(\"Item at index 2\" in message)\n+ self.assertFalse(\"Item at index 3\" in message)\n+\n+ def test_with_scheduler(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator()\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ accelerator.register_for_checkpointing(scheduler)\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ scheduler_state = scheduler.state_dict()\n+ train(3, model, train_dataloader, optimizer, accelerator, scheduler)\n+ self.assertNotEqual(scheduler_state, scheduler.state_dict())\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(initial)\n+ self.assertEqual(scheduler_state, scheduler.state_dict())\n", "code_comments": [ { "body": "Why the +1 here?", "diff_hunk": "@@ -591,6 +597,14 @@ def load_state(self, input_dir: str):\n raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n logger.info(f\"Loading states from {input_dir}\")\n load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n+ custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n+ if len(custom_checkpoints) != (len(self._custom_objects) + 1):", "from_author": false }, { "body": "I would append them to the existing `_custom_objects`.", "diff_hunk": "@@ -646,6 +660,15 @@ def get_state_dict(self, model):\n \n return state_dict\n \n+ def register_for_checkpointing(self, *objects):\n+ \"\"\"\n+ Makes note of `objects` and will save or load them in during `save_state` or `load_state`.\n+\n+ Note: These should be utilized when the state is being loaded or saved in the same script. It is not designed\n+ to be used in different scripts\n+ \"\"\"\n+ self._custom_objects = list(objects)", "from_author": false }, { "body": "You should use a Path or `os.path.join` here otherwise it won't work in Windows.", "diff_hunk": "@@ -132,3 +132,27 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n if is_tpu_available():\n xm.set_rng_state(states[\"xm_seed\"])\n logger.info(\"All random states loaded successfully\")\n+\n+\n+def save_custom_state(obj, path, index: int = 0):\n+ \"\"\"\n+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`\n+ \"\"\"\n+ # Should this be the right way to get a qual_name type value from `obj`?\n+ save_location = f\"{path}/custom_checkpoint_{index}.pkl\"", "from_author": false }, { "body": "We don't need a torch.load inside here?", "diff_hunk": "@@ -132,3 +132,27 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n if is_tpu_available():\n xm.set_rng_state(states[\"xm_seed\"])\n logger.info(\"All random states loaded successfully\")\n+\n+\n+def save_custom_state(obj, path, index: int = 0):\n+ \"\"\"\n+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`\n+ \"\"\"\n+ # Should this be the right way to get a qual_name type value from `obj`?\n+ save_location = f\"{path}/custom_checkpoint_{index}.pkl\"\n+ logger.info(f\"Saving the state of {str(obj)} to {save_location}\")\n+ if hasattr(obj, \"state_dict\"):\n+ obj = obj.state_dict()\n+ torch.save(obj, save_location)\n+\n+\n+def load_custom_state(obj, path, index: int = 0):\n+ \"\"\"\n+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`\n+ \"\"\"\n+ load_location = f\"{path}/custom_checkpoint_{index}.pkl\"\n+ if hasattr(obj, \"load_state_dict\"):\n+ obj = obj.load_state_dict(load_location)", "from_author": false }, { "body": "For the custom objects + the saved objected was my thought at the time. But you're write its a bug", "diff_hunk": "@@ -591,6 +597,14 @@ def load_state(self, input_dir: str):\n raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n logger.info(f\"Loading states from {input_dir}\")\n load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n+ custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n+ if len(custom_checkpoints) != (len(self._custom_objects) + 1):", "from_author": true }, { "body": "The warning message does not match what the test is testing :-)", "diff_hunk": "@@ -591,6 +598,13 @@ def load_state(self, input_dir: str):\n raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n logger.info(f\"Loading states from {input_dir}\")\n load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n+ custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n+ if len(custom_checkpoints) != len(self._custom_objects):\n+ logger.warn(\"Warning! You are loading a state that does not include registered objects.\")", "from_author": false }, { "body": "This is not a proper docstring **but** it will be once #271 is merged, so giving you a free pass ;-)", "diff_hunk": "@@ -646,6 +660,26 @@ def get_state_dict(self, model):\n \n return state_dict\n \n+ def register_for_checkpointing(self, *objects):\n+ \"\"\"\n+ Makes note of `objects` and will save or load them in during `save_state` or `load_state`.\n+\n+ These should be utilized when the state is being loaded or saved in the same script. It is not designed to be\n+ used in different scripts\n+\n+ Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored.", "from_author": false }, { "body": "```suggestion\r\n torch.save(obj.state_dict(), save_location)\r\n```\r\nCan be combined now.", "diff_hunk": "@@ -132,3 +133,23 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=\n if is_tpu_available():\n xm.set_rng_state(states[\"xm_seed\"])\n logger.info(\"All random states loaded successfully\")\n+\n+\n+def save_custom_state(obj, path, index: int = 0):\n+ \"\"\"\n+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`\n+ \"\"\"\n+ # Should this be the right way to get a qual_name type value from `obj`?\n+ save_location = Path(path) / f\"custom_checkpoint_{index}.pkl\"\n+ logger.info(f\"Saving the state of {get_pretty_name(obj)} to {save_location}\")\n+ obj = obj.state_dict()\n+ torch.save(obj, save_location)", "from_author": false }, { "body": "Not sure we need a new class for this. Can be included in the current one!", "diff_hunk": "@@ -123,3 +125,43 @@ def test_can_resume_training(self):\n self.assertEqual(b1, b3)\n self.assertEqual(opt_state1, opt_state3)\n self.assertEqual(ground_truth_rands, test_rands)\n+\n+\n+class CustomItemsTest(unittest.TestCase):", "from_author": false }, { "body": "Can you print the two numbers?", "diff_hunk": "@@ -600,7 +600,9 @@ def load_state(self, input_dir: str):\n load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n if len(custom_checkpoints) != len(self._custom_objects):\n- logger.warn(\"Warning! You are loading a state that does not include registered objects.\")\n+ logger.warn(\n+ \"Warning! Number of found checkpoints does not match the number of registered objects. Skipping.\"", "from_author": false }, { "body": "How does \r\n```python\r\n err = \"Warning! Number of found checkpoints does not match the number of registered objects:\"\r\n err += f\"\\n\\tFound checkpoints: {len(custom_checkpoints)}\"\r\n err += f\"\\n\\tRegistered objects: {len(self._custom_objects)}\\nSkipping.\"\r\n```\r\nSound?", "diff_hunk": "@@ -600,7 +600,9 @@ def load_state(self, input_dir: str):\n load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n custom_checkpoints = [f for f in os.listdir(input_dir) if \"custom_checkpoint\" in f]\n if len(custom_checkpoints) != len(self._custom_objects):\n- logger.warn(\"Warning! You are loading a state that does not include registered objects.\")\n+ logger.warn(\n+ \"Warning! Number of found checkpoints does not match the number of registered objects. Skipping.\"", "from_author": true } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/270", "pr_id": 873453450 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 1254c19e9..f59d3e8bd 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -166,6 +166,10 @@ def __init__(\n self.device_placement = device_placement\n self.split_batches = split_batches\n self.dispatch_batches = dispatch_batches\n+ if dispatch_batches is True and version.parse(torch.__version__) < version.parse(\"1.8.0\"):\n+ raise ImportError(\n+ \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n+ )\n \n # Mixed precision attributes\n self.scaler = None\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex b28f24fc6..cc46e5577 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -328,6 +328,10 @@ class DataLoaderDispatcher(DataLoader):\n def __init__(self, dataset, split_batches: bool = False, **kwargs):\n super().__init__(dataset, **kwargs)\n self.split_batches = split_batches\n+ if version.parse(torch.__version__) < version.parse(\"1.8.0\"):\n+ raise ImportError(\n+ \"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}.\"\n+ )\n \n def __iter__(self):\n state = AcceleratorState()\n@@ -474,7 +478,10 @@ def prepare_data_loader(\n This does not support :obj:`BatchSampler` with varying batch size yet.\n \"\"\"\n if dispatch_batches is None:\n- dispatch_batches = False if not put_on_device else isinstance(dataloader.dataset, IterableDataset)\n+ if version.parse(torch.__version__) < version.parse(\"1.8.0\") or not put_on_device:\n+ dispatch_batches = False\n+ else:\n+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)\n \n if dispatch_batches and not put_on_device:\n raise ValueError(\"Using `dispatch_batches=True` requires `put_on_device=True`.\")\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/269", "pr_id": 873301616 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 2d9937642..62b1291ee 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -63,13 +63,16 @@ class TensorInformation:\n dtype: torch.dtype\n \n \n-def set_seed(seed: int):\n+def set_seed(seed: int, device_specific: bool = False):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n \n Args:\n seed (:obj:`int`): The seed to set.\n+ device_specific (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to differ the seed on each device slightly with `self.process_index`\n \"\"\"\n+ if device_specific:\n+ seed += AcceleratorState().process_index\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n", "code_comments": [ { "body": "```suggestion\r\n device_specific (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to differ the seed on each device slightly with `self.process_index`\r\n```", "diff_hunk": "@@ -63,13 +63,16 @@ class TensorInformation:\n dtype: torch.dtype\n \n \n-def set_seed(seed: int):\n+def set_seed(seed: int, device_specific: bool = False):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n \n Args:\n seed (:obj:`int`): The seed to set.\n+ device_specific (:obj:`bool`): Whether to differ the seed on each device slightly with `self.process_index`", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/266", "pr_id": 869844938 }, { "diff": "diff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex a029acfae..e8c910e19 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -256,11 +256,6 @@ def __init__(\n \n def __repr__(self):\n mixed_precision = self.mixed_precision\n- if self.distributed_type == DistributedType.DEEPSPEED:\n- if self.deepspeed_plugin.fp16:\n- mixed_precision = \"fp16\"\n- if self.deepspeed_plugin.bflaot16:\n- mixed_precision = \"bf16\"\n \n repr = (\n f\"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\\n\"\n@@ -271,5 +266,5 @@ def __repr__(self):\n f\"Mixed precision type: {mixed_precision}\\n\"\n )\n if self.distributed_type == DistributedType.DEEPSPEED:\n- repr += f\"ds_config: {self.deepspeed_plugin.ds_config}\\n\"\n+ repr += f\"ds_config: {self.deepspeed_plugin.deepspeed_config}\\n\"\n return repr\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex ecd8e4f02..f9a99e22f 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -59,6 +59,8 @@ def dl_preparation_check():\n for batch in dl:\n result.append(gather(batch))\n result = torch.cat(result)\n+\n+ print(state.process_index, result, type(dl))\n assert torch.equal(result.cpu(), torch.arange(0, length).long()), \"Wrong non-shuffled dataloader result.\"\n \n dl = DataLoader(range(length), batch_size=8)\n@@ -327,6 +329,10 @@ def main():\n dl_preparation_check()\n central_dl_preparation_check()\n \n+ # Trainings are not exactly the same in DeepSpeed and CPU mode\n+ if state.distributed_type == DistributedType.DEEPSPEED:\n+ return\n+\n if state.local_process_index == 0:\n print(\"\\n**Training integration test**\")\n training_check()\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 83a40bf86..de5978802 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -91,7 +91,7 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n- elif state.distributed_type == DistributedType.MULTI_GPU:\n+ elif state.distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n@@ -343,7 +343,7 @@ def gather(tensor):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n- elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n return _gpu_gather(tensor)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather(tensor)\n@@ -376,7 +376,7 @@ def gather_object(object: Any):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n raise NotImplementedError(\"gather objects in TPU is not supported\")\n- elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n return _gpu_gather_object(object)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather_object(object)\n@@ -415,7 +415,7 @@ def broadcast(tensor, from_process: int = 0):\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_broadcast(tensor, src=from_process, name=\"accelerate.utils.broadcast\")\n- elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n return _gpu_broadcast(tensor, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _gpu_broadcast(tensor, src=from_process)\n@@ -439,7 +439,7 @@ def broadcast_object_list(object_list, from_process: int = 0):\n if AcceleratorState().distributed_type == DistributedType.TPU:\n for i, obj in enumerate(object_list):\n object_list[i] = xm.mesh_reduce(\"accelerate.utils.broadcast_object_list\", obj, lambda x: x[from_process])\n- elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n torch.distributed.broadcast_object_list(object_list, src=from_process)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/264", "pr_id": 868226627 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 534ea1526..15eaaecb6 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -52,6 +52,17 @@ def get_cluster_input():\n lambda x: int(x),\n )\n \n+ if distributed_type == DistributedType.NO:\n+ use_cpu = _ask_field(\n+ \"Do you want to run your training on CPU only (even if a GPU is available)? [no]:\",\n+ lambda x: bool(x),\n+ default=False,\n+ )\n+ elif distributed_type == DistributedType.MULTI_CPU:\n+ use_cpu = True\n+ else:\n+ use_cpu = False\n+\n deepspeed_config = None\n if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n use_deepspeed = _ask_field(\n@@ -122,4 +133,5 @@ def get_cluster_input():\n main_process_port=main_process_port,\n main_training_function=main_training_function,\n deepspeed_config=deepspeed_config,\n+ use_cpu=use_cpu,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 891580ab5..5af912fc3 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -67,6 +67,7 @@ class BaseConfig:\n compute_environment: ComputeEnvironment\n distributed_type: Union[DistributedType, SageMakerDistributedType]\n mixed_precision: str\n+ use_cpu: bool\n \n def to_dict(self):\n result = self.__dict__\n@@ -87,6 +88,8 @@ def from_json_file(cls, json_file=None):\n config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n+ if \"use_cpu\" not in config_dict:\n+ config_dict[\"use_cpu\"] = False\n return cls(**config_dict)\n \n def to_json_file(self, json_file):\n@@ -106,6 +109,8 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n+ if \"use_cpu\" not in config_dict:\n+ config_dict[\"use_cpu\"] = False\n \n return cls(**config_dict)\n \ndiff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\nindex a8429db96..a47eb9a4c 100644\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -171,6 +171,7 @@ def debug_launcher(function, args=(), num_processes=2):\n master_port=\"29500\",\n mixed_precision=\"no\",\n accelerate_debug_rdv_file=tmp_file.name,\n+ use_cpu=\"yes\",\n ):\n launcher = PrepareForLaunch(function, debug=True)\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 59aebef2d..a029acfae 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -153,6 +153,8 @@ def __init__(\n **kwargs,\n ):\n self.__dict__ = self._shared_state\n+ if parse_flag_from_env(\"USE_CPU\"):\n+ cpu = True\n if not getattr(self, \"initialized\", False):\n self.backend = None\n self.deepspeed_plugin = None\n@@ -217,14 +219,7 @@ def __init__(\n rank = get_int_from_env([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n size = get_int_from_env([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n local_rank = get_int_from_env(\n- [\n- \"LOCAL_RANK\",\n- \"CPU_LOCAL_RANK\",\n- \"MPI_LOCALRANKID\",\n- \"OMPI_COMM_WORLD_LOCAL_RANK\",\n- \"MV2_COMM_WORLD_LOCAL_RANK\",\n- ],\n- 0,\n+ [\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0\n )\n local_size = get_int_from_env(\n [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 6eba4b295..83a40bf86 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -614,8 +614,6 @@ def __call__(self, index, *args):\n store=torch.distributed.FileStore(rdv_file, world_size),\n world_size=world_size,\n )\n- # Prepare the environment for torch.distributed\n- os.environ[\"CPU_LOCAL_RANK\"] = str(index)\n elif self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n # Prepare the environment for torch.distributed\n os.environ[\"LOCAL_RANK\"] = str(index)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/263", "pr_id": 868155001 }, { "diff": "diff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex e0b96cd10..f77a26a56 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -6,6 +6,6 @@\n \n from .accelerator import Accelerator\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs\n-from .notebook_launcher import notebook_launcher\n+from .launchers import debug_launcher, notebook_launcher\n from .state import DistributedType\n from .utils import DeepSpeedPlugin, synchronize_rng_states\ndiff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/launchers.py\nsimilarity index 75%\nrename from src/accelerate/notebook_launcher.py\nrename to src/accelerate/launchers.py\nindex 90b4f826a..a8429db96 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/launchers.py\n@@ -14,6 +14,7 @@\n \n import os\n import sys\n+import tempfile\n import warnings\n \n import torch\n@@ -21,7 +22,7 @@\n from packaging import version\n \n from .state import AcceleratorState\n-from .utils import PrepareForLaunch\n+from .utils import PrepareForLaunch, patch_environment\n \n \n def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n@@ -106,12 +107,6 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n \"function.\"\n )\n \n- # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n- # process here (the other ones will be set be the launcher).\n- os.environ[\"WORLD_SIZE\"] = str(num_processes)\n- os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n- os.environ[\"MASTER_PORT\"] = str(use_port)\n-\n mixed_precision = mixed_precision.lower()\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n raise ValueError(\n@@ -122,17 +117,15 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n warnings.warn('use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n \n- os.environ[\"MIXED_PRECISION\"] = str(mixed_precision)\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ with patch_environment(\n+ world_size=num_processes, master_addr=\"127.0.01\", master_port=use_port, mixed_precision=mixed_precision\n+ ):\n+ launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n \n- launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n- try:\n print(f\"Launching training on {num_processes} GPUs.\")\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n- finally:\n- # Clean up the environment variables set.\n- del os.environ[\"WORLD_SIZE\"]\n- del os.environ[\"MASTER_ADDR\"]\n- del os.environ[\"MASTER_PORT\"]\n \n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n@@ -141,3 +134,43 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mix\n else:\n print(\"Launching training on CPU.\")\n function(*args)\n+\n+\n+def debug_launcher(function, args=(), num_processes=2):\n+ \"\"\"\n+ Launches a training function using several processes on CPU for debugging purposes.\n+\n+ .. warning::\n+\n+ This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n+ only use the CPU.\n+\n+\n+ Args:\n+ function (:obj:`Callable`):\n+ The training function to execute.\n+ args (:obj:`Tuple`):\n+ Tuple of arguments to pass to the function (it will receive :obj:`*args`).\n+ num_processes (:obj:`int`, *optional*, defaults to 2):\n+ The number of processes to use for training.\n+ \"\"\"\n+ if version.parse(torch.__version__) < version.parse(\"1.5.0\"):\n+ raise ImportError(\n+ \"Using `debug_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n+ f\"{torch.__version__}.\"\n+ )\n+\n+ from torch.multiprocessing import start_processes\n+\n+ with tempfile.NamedTemporaryFile() as tmp_file:\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ with patch_environment(\n+ world_size=num_processes,\n+ master_addr=\"127.0.01\",\n+ master_port=\"29500\",\n+ mixed_precision=\"no\",\n+ accelerate_debug_rdv_file=tmp_file.name,\n+ ):\n+ launcher = PrepareForLaunch(function, debug=True)\n+ start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 6693a34ca..59aebef2d 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -217,7 +217,14 @@ def __init__(\n rank = get_int_from_env([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n size = get_int_from_env([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n local_rank = get_int_from_env(\n- [\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0\n+ [\n+ \"LOCAL_RANK\",\n+ \"CPU_LOCAL_RANK\",\n+ \"MPI_LOCALRANKID\",\n+ \"OMPI_COMM_WORLD_LOCAL_RANK\",\n+ \"MV2_COMM_WORLD_LOCAL_RANK\",\n+ ],\n+ 0,\n )\n local_size = get_int_from_env(\n [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 36eaea592..443c55622 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -16,6 +16,7 @@\n import os\n import random\n from collections.abc import Mapping\n+from contextlib import contextmanager\n from dataclasses import dataclass, field\n from enum import Enum\n from typing import Any, List, Optional, Union\n@@ -594,14 +595,28 @@ class PrepareForLaunch:\n The function to launch.\n distributed_type (:class:`~accelerate.state.DistributedType`):\n The distributed type to prepare for.\n+ debug (:obj:`bool`, *optional*, defaults to :obj:`False`):\n+ Whether or not this is a debug launch.\n \"\"\"\n \n- def __init__(self, launcher, distributed_type=\"NO\"):\n+ def __init__(self, launcher, distributed_type=\"NO\", debug=False):\n self.launcher = launcher\n self.distributed_type = DistributedType(distributed_type)\n+ self.debug = debug\n \n def __call__(self, index, *args):\n- if self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n+ if self.debug:\n+ world_size = int(os.environ.get(\"WORLD_SIZE\"))\n+ rdv_file = os.environ.get(\"ACCELERATE_DEBUG_RDV_FILE\")\n+ torch.distributed.init_process_group(\n+ \"gloo\",\n+ rank=index,\n+ store=torch.distributed.FileStore(rdv_file, world_size),\n+ world_size=world_size,\n+ )\n+ # Prepare the environment for torch.distributed\n+ os.environ[\"CPU_LOCAL_RANK\"] = str(index)\n+ elif self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n # Prepare the environment for torch.distributed\n os.environ[\"LOCAL_RANK\"] = str(index)\n os.environ[\"RANK\"] = str(index)\n@@ -654,3 +669,19 @@ def __post_init__(self):\n \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n \"zero_allow_untested_optimizer\": True,\n }\n+\n+\n+@contextmanager\n+def patch_environment(**kwargs):\n+ \"\"\"\n+ A context manager that will add each keyword argument passed to ``os.environ`` and remove them when exiting.\n+\n+ Will convert the values in :obj:`kwargs` to strings and upper-case all the keys.\n+ \"\"\"\n+ for key, value in kwargs.items():\n+ os.environ[key.upper()] = str(value)\n+\n+ yield\n+\n+ for key in kwargs:\n+ del os.environ[key.upper()]\ndiff --git a/tests/test_cpu.py b/tests/test_cpu.py\nnew file mode 100644\nindex 000000000..72a8b2e7d\n--- /dev/null\n+++ b/tests/test_cpu.py\n@@ -0,0 +1,23 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+\n+from accelerate import debug_launcher\n+from accelerate.test_utils import test_script\n+\n+\n+class MultiTPUTester(unittest.TestCase):\n+ def test_cpu(self):\n+ debug_launcher(test_script.main)\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex e515df8fe..a31c45d31 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -12,12 +12,13 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import os\n import unittest\n from collections import UserDict, namedtuple\n \n import torch\n \n-from accelerate.utils import send_to_device\n+from accelerate.utils import patch_environment, send_to_device\n \n \n TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b c\")\n@@ -62,3 +63,11 @@ def test_send_to_device(self):\n self.assertTrue(torch.equal(result4[\"b\"][0].cpu(), tensor))\n self.assertTrue(torch.equal(result4[\"b\"][1].cpu(), tensor))\n self.assertEqual(result4[\"c\"], 1)\n+\n+ def test_patch_environment(self):\n+ with patch_environment(aa=1, BB=2):\n+ self.assertEqual(os.environ.get(\"AA\"), \"1\")\n+ self.assertEqual(os.environ.get(\"BB\"), \"2\")\n+\n+ self.assertNotIn(\"AA\", os.environ)\n+ self.assertNotIn(\"BB\", os.environ)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/259", "pr_id": 862497675 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 77db4546b..ff1b148f4 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -92,6 +92,17 @@ def launch_command_parser(subparsers=None):\n default=None,\n help=\"The name of the main function to be executed in your script (only for TPU training).\",\n )\n+ parser.add_argument(\n+ \"-m\",\n+ \"--module\",\n+ action=\"store_true\",\n+ help=\"Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\",\n+ )\n+ parser.add_argument(\n+ \"--no_python\",\n+ action=\"store_true\",\n+ help=\"Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\",\n+ )\n parser.add_argument(\n \"--aws_access_key_id\",\n type=str,\n@@ -140,7 +151,14 @@ def launch_command_parser(subparsers=None):\n \n \n def simple_launcher(args):\n- cmd = [sys.executable, args.training_script]\n+ cmd = []\n+ if args.no_python and args.module:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ if not args.no_python:\n+ cmd.append(sys.executable)\n+ if args.module:\n+ cmd.append(\"-m\")\n+ cmd.append(args.training_script)\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n@@ -163,8 +181,7 @@ def simple_launcher(args):\n \n \n def multi_gpu_launcher(args):\n- cmd = [sys.executable, \"-m\", \"torch.distributed.launch\"]\n- cmd.extend([\"--use_env\"])\n+ cmd = [sys.executable, \"-m\", \"torch.distributed.launch\", \"--use_env\"]\n if args.num_machines > 1:\n cmd.extend(\n [\n@@ -184,6 +201,13 @@ def multi_gpu_launcher(args):\n cmd.extend([\"--nproc_per_node\", str(args.num_processes)])\n if args.main_process_port is not None:\n cmd.extend([\"--master_port\", str(args.main_process_port)])\n+\n+ if args.module and args.no_python:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ elif args.module:\n+ cmd.append(\"--module\")\n+ elif args.no_python:\n+ cmd.append(\"--no_python\")\n cmd.append(args.training_script)\n cmd.extend(args.training_script_args)\n \n@@ -206,7 +230,6 @@ def multi_gpu_launcher(args):\n \n \n def deepspeed_launcher(args):\n-\n cmd = [\"deepspeed\"]\n if args.num_machines > 1:\n cmd.extend(\n@@ -226,6 +249,12 @@ def deepspeed_launcher(args):\n else:\n cmd.extend([\"--num_gpus\", str(args.num_processes)])\n \n+ if args.module and args.no_python:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ elif args.module:\n+ cmd.append(\"--module\")\n+ elif args.no_python:\n+ cmd.append(\"--no_python\")\n cmd.append(args.training_script)\n cmd.extend(args.training_script_args)\n \n@@ -254,21 +283,28 @@ def deepspeed_launcher(args):\n def tpu_launcher(args):\n import torch_xla.distributed.xla_multiprocessing as xmp\n \n- # Import training_script as a module.\n- script_path = Path(args.training_script)\n- sys.path.append(str(script_path.parent.resolve()))\n- mod_name = script_path.stem\n+ if args.no_python:\n+ raise ValueError(\"--no_python cannot be used with TPU launcher\")\n+\n+ if args.module:\n+ mod_name = args.training_script\n+ else:\n+ # Import training_script as a module\n+ script_path = Path(args.training_script)\n+ sys.path.append(str(script_path.parent.resolve()))\n+ mod_name = script_path.stem\n+\n mod = importlib.import_module(mod_name)\n if not hasattr(mod, args.main_training_function):\n raise ValueError(\n f\"Your training script should have a function named {args.main_training_function}, or you should pass a \"\n \"different value to `--main_training_function`.\"\n )\n- main_function = getattr(mod, args.main_training_function)\n \n # Patch sys.argv\n- sys.argv = [args.training_script] + args.training_script_args\n+ sys.argv = [mod.__file__] + args.training_script_args\n \n+ main_function = getattr(mod, args.main_training_function)\n xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)\n \n \n@@ -319,6 +355,11 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n raise ImportError(\n \"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`\"\n )\n+ if args.module or args.no_python:\n+ raise ValueError(\n+ \"SageMaker requires a python training script file and cannot be used with --module or --no_python\"\n+ )\n+\n from sagemaker.huggingface import HuggingFace\n \n # configure environment\n", "code_comments": [ { "body": "Since both can't be true at the same time, the test should not be nested.\r\n```suggestion\r\n elif args.module:\r\n cmd.append(\"-m\")\r\n```", "diff_hunk": "@@ -140,7 +151,14 @@ def launch_command_parser(subparsers=None):\n \n \n def simple_launcher(args):\n- cmd = [sys.executable, args.training_script]\n+ cmd = []\n+ if args.no_python and args.module:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ if not args.no_python:\n+ cmd.append(sys.executable)\n+ if args.module:\n+ cmd.append(\"-m\")", "from_author": false }, { "body": "```suggestion\r\n elif args.module:\r\n cmd.append(\"--module\")\r\n elif args.no_python:\r\n```", "diff_hunk": "@@ -184,6 +201,13 @@ def multi_gpu_launcher(args):\n cmd.extend([\"--nproc_per_node\", str(args.num_processes)])\n if args.main_process_port is not None:\n cmd.extend([\"--master_port\", str(args.main_process_port)])\n+\n+ if args.module and args.no_python:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ if args.module:\n+ cmd.append(\"--module\")\n+ if args.no_python:", "from_author": false }, { "body": "```suggestion\r\n if args.module and args.no_python:\r\n raise ValueError(\"--module and --no_python cannot be used together\")\r\n elif args.module:\r\n cmd.append(\"--module\")\r\n elif args.no_python:\r\n```", "diff_hunk": "@@ -226,6 +249,12 @@ def deepspeed_launcher(args):\n else:\n cmd.extend([\"--num_gpus\", str(args.num_processes)])\n \n+ if args.module and args.no_python:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ if args.module:\n+ cmd.append(\"--module\")\n+ if args.no_python:", "from_author": false }, { "body": "Good catch, thanks. I think after the un-indent, it still has to stay an `if` and not an `elif`, otherwise it won't get evaluated because the `if not args.no_python` above would always be evaluated `True` first.\r\n\r\n```suggestion\r\n if args.module:\r\n cmd.append(\"-m\")\r\n```", "diff_hunk": "@@ -140,7 +151,14 @@ def launch_command_parser(subparsers=None):\n \n \n def simple_launcher(args):\n- cmd = [sys.executable, args.training_script]\n+ cmd = []\n+ if args.no_python and args.module:\n+ raise ValueError(\"--module and --no_python cannot be used together\")\n+ if not args.no_python:\n+ cmd.append(sys.executable)\n+ if args.module:\n+ cmd.append(\"-m\")", "from_author": true } ], "context": [ { "body": "Thanks again for working on this!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/258", "pr_id": 861047307 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex b841bc809..1254c19e9 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -22,6 +22,7 @@\n \n from packaging import version\n \n+from .checkpointing import load_accelerator_state, save_accelerator_state\n from .data_loader import prepare_data_loader\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n@@ -40,6 +41,7 @@\n \n if is_deepspeed_available():\n import deepspeed\n+\n from .deepspeed_utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n \n import logging\n@@ -560,6 +562,36 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scaler, and RNG generators.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+ weights = [self.get_state_dict(m) for m in self._models]\n+ return save_accelerator_state(output_dir, weights, self._optimizers, self.state.process_index, self.scaler)\n+\n+ def load_state(self, input_dir: str):\n+ \"\"\"\n+ Loads the current states of the model, optimizer, scaler, and RNG generators.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder all relevant weights and states were saved in.\n+ \"\"\"\n+ # Check if folder exists\n+ input_dir = os.path.expanduser(input_dir)\n+ if not os.path.isdir(input_dir):\n+ raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n+ logger.info(f\"Loading states from {input_dir}\")\n+ load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)\n+\n def free_memory(self):\n \"\"\"\n Will release all references to the internal objects stored and call the garbage collector. You should call this\ndiff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py\nnew file mode 100644\nindex 000000000..37d68b90c\n--- /dev/null\n+++ b/src/accelerate/checkpointing.py\n@@ -0,0 +1,134 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`List[torch.nn.Module]`):\n+ A list of model states\n+ optimizers (:obj:`List[torch.optim.Optimizer]`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):\n+ An optional gradient scaler instance to save\n+ \"\"\"\n+ # Model states\n+ for i, state in enumerate(model_states):\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = f\"{OPTIMIZER_NAME}.bin\" if i == 0 else f\"{OPTIMIZER_NAME}_{i}.bin\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScaler state\n+ if scaler is not None:\n+ state = scaler.state_dict()\n+ output_scaler_file = os.path.join(output_dir, SCALER_NAME)\n+ torch.save(state, output_scaler_file)\n+ logger.info(f\"Gradient scaler state saved in {output_scaler_file}\")\n+ # Random number generator states\n+ states = {}\n+ states_name = f\"{RNG_STATE_NAME}_{process_index}.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state_all()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+ return output_dir\n+\n+\n+def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=None):\n+ \"\"\"\n+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to load all relevant weights and states.\n+ model_stmodelsates (:obj:`List[torch.nn.Module]`):\n+ A list of model instances\n+ optimizers (:obj:`List[torch.optim.Optimizer]`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):\n+ An optional `GradScaler` instance to load\n+ \"\"\"\n+ # Model states\n+ for i, model in enumerate(models):\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ input_model_file = os.path.join(input_dir, weights_name)\n+ models[i].load_state_dict(torch.load(input_model_file))\n+ logger.info(\"All model weights loaded successfully\")\n+\n+ # Optimizer states\n+ for i, opt in enumerate(optimizers):\n+ optimizer_name = f\"{OPTIMIZER_NAME}.bin\" if i == 0 else f\"{OPTIMIZER_NAME}_{i}.bin\"\n+ input_optimizer_file = os.path.join(input_dir, optimizer_name)\n+ optimizers[i].load_state_dict(torch.load(input_optimizer_file))\n+ logger.info(\"All optimizer states loaded successfully\")\n+\n+ # GradScaler state\n+ if scaler is not None:\n+ input_scaler_file = os.path.join(input_dir, SCALER_NAME)\n+ scaler.load_state_dict(torch.load(input_scaler_file))\n+ logger.info(\"GradScaler state loaded successfully\")\n+\n+ # Random states\n+ states = torch.load(os.path.join(input_dir, f\"{RNG_STATE_NAME}_{process_index}.pkl\"))\n+ random.setstate(states[\"random_state\"])\n+ np.random.set_state(states[\"numpy_random_seed\"])\n+ torch.set_rng_state(states[\"torch_manual_seed\"])\n+ torch.cuda.set_rng_state_all(states[\"torch_cuda_manual_seed\"])\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(states[\"xm_seed\"])\n+ logger.info(\"All random states loaded successfully\")\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 3af792f1f..abd5152ae 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -43,6 +43,11 @@ def is_sagemaker_available():\n if is_deepspeed_available():\n from deepspeed import DeepSpeedEngine\n \n+SCALER_NAME = \"scaler.pt\"\n+MODEL_NAME = \"pytorch_model\"\n+RNG_STATE_NAME = \"random_states\"\n+OPTIMIZER_NAME = \"optimizer\"\n+\n \n class RNGType(Enum):\n TORCH = \"torch\"\ndiff --git a/tests/test_state_checkpointing.py b/tests/test_state_checkpointing.py\nnew file mode 100644\nindex 000000000..a74dcb724\n--- /dev/null\n+++ b/tests/test_state_checkpointing.py\n@@ -0,0 +1,125 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import logging\n+import os\n+import random\n+import tempfile\n+import unittest\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+def train(num_epochs, model, dataloader, optimizer, accelerator):\n+ \"Trains for `num_epochs`\"\n+ rands = []\n+ for epoch in range(num_epochs):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ rands.append(random.random()) # Introduce some randomness\n+ return rands\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+class CheckpointTest(unittest.TestCase):\n+ def test_can_resume_training(self):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator()\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ (a, b) = model.a.item(), model.b.item()\n+ opt_state = optimizer.state_dict()\n+ ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)\n+ (a1, b1) = model.a.item(), model.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ accelerator = Accelerator()\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ accelerator.load_state(initial)\n+ (a2, b2) = model.a.item(), model.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ self.assertEqual(a, a2)\n+ self.assertEqual(b, b2)\n+ self.assertEqual(opt_state, opt_state2)\n+\n+ test_rands = train(2, model, train_dataloader, optimizer, accelerator)\n+ # Save everything\n+ checkpoint = os.path.join(tmpdir, \"checkpoint\")\n+ accelerator.save_state(checkpoint)\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(checkpoint)\n+ test_rands += train(1, model, train_dataloader, optimizer, accelerator)\n+ (a3, b3) = model.a.item(), model.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ self.assertEqual(a1, a3)\n+ self.assertEqual(b1, b3)\n+ self.assertEqual(opt_state1, opt_state3)\n+ self.assertEqual(ground_truth_rands, test_rands)\n", "code_comments": [ { "body": "As much as possible, I would prefer to have everything in another module and have `accelerator` be a thin wrapper. I think we could have a new `checkpointing.py` file where a function that does all of this (taking `models`, `optimizers`, `scaler`). Same for the loading.", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):", "from_author": false }, { "body": "```suggestion\r\n if i != 0:\r\n weights_name += f\"_{i}\"\r\n```\r\nIs it a typo? I think it's for every model **but** the first one.", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"", "from_author": false }, { "body": "Should be `accelerator.save` here (otherwise won't work for TPUs).", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)", "from_author": false }, { "body": "Same here:\r\n```suggestion\r\n accelerator.save(state, output_optimizer_file)\r\n```", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)", "from_author": false }, { "body": "```suggestion\r\n if i != 0:\r\n optimizer_name += f\"_{i}\"\r\n```\r\nAs above!", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"", "from_author": false }, { "body": "```suggestion\r\n scaler_name = \"scaler.pt\"\r\n```\r\nLet's use the same name as in the `Trainer`.", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"", "from_author": false }, { "body": "```suggestion\r\n logger.info(f\"Gradient scaler state saved in {output_scaler_file}\")\r\n```", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")", "from_author": false }, { "body": "```suggestion\r\n output_scaler_file = os.path.join(output_dir, scaler_name)\r\n torch.save(state, output_scaler_file)\r\n```", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)", "from_author": false }, { "body": "I realize we are missing the learning rate scheduler -> we should thus add it as an optional argument to the `save_state` function, or add a `register_for_checkpoints` method to the Accelerator object that would take objects not passed to the `prepare` method but the user still wants to save.", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state", "from_author": false }, { "body": "```suggestion\r\n # Random number generator states\r\n```", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")\n+ # Random states", "from_author": false }, { "body": "We use `rng_state.pth` in the Trainer, so we should do so here as well for consistency across libs.\r\n\r\nAlso, we need to save the states on each process as they might be different (in `rng_state_{i}.pth` for process i)", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")\n+ # Random states\n+ states = {}\n+ states_name = \"random_states.pkl\"", "from_author": false }, { "body": "```suggestion\r\n if i != 0:\r\n weights_name += f\"_{i}\"\r\n```", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")\n+ # Random states\n+ states = {}\n+ states_name = \"random_states.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state_all()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+\n+ def load_state(self, input_dir: str):\n+ \"\"\"\n+ Loads the current states of the model, optimizer, scalar, and RNG generators from `folder_name`.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder all relevant weights and states were saved in.\n+ \"\"\"\n+ # Check if folder exists\n+ input_dir = os.path.expanduser(input_dir)\n+ if not os.path.isdir(input_dir):\n+ raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n+ logger.info(f\"Loading states from {input_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"", "from_author": false }, { "body": "```suggestion\r\n if i != 0:\r\n optimizer_name += f\"_{i}\"\r\n```", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")\n+ # Random states\n+ states = {}\n+ states_name = \"random_states.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state_all()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+\n+ def load_state(self, input_dir: str):\n+ \"\"\"\n+ Loads the current states of the model, optimizer, scalar, and RNG generators from `folder_name`.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder all relevant weights and states were saved in.\n+ \"\"\"\n+ # Check if folder exists\n+ input_dir = os.path.expanduser(input_dir)\n+ if not os.path.isdir(input_dir):\n+ raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n+ logger.info(f\"Loading states from {input_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ input_model_file = os.path.join(input_dir, weights_name)\n+ self._models[i].load_state_dict(torch.load(input_model_file))\n+ logger.info(\"All model weights loaded successfully\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"", "from_author": false }, { "body": "The names should probably be constants, so we only have to change them in one place ;-)", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")\n+ # Random states\n+ states = {}\n+ states_name = \"random_states.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state_all()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+\n+ def load_state(self, input_dir: str):\n+ \"\"\"\n+ Loads the current states of the model, optimizer, scalar, and RNG generators from `folder_name`.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder all relevant weights and states were saved in.\n+ \"\"\"\n+ # Check if folder exists\n+ input_dir = os.path.expanduser(input_dir)\n+ if not os.path.isdir(input_dir):\n+ raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n+ logger.info(f\"Loading states from {input_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ input_model_file = os.path.join(input_dir, weights_name)\n+ self._models[i].load_state_dict(torch.load(input_model_file))\n+ logger.info(\"All model weights loaded successfully\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ input_optimizer_file = os.path.join(input_dir, optimizer_name)\n+ self._optimizers[i].load_state_dict(torch.load(input_optimizer_file))\n+ logger.info(\"All optimizer states loaded successfully\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ scaler_name = \"scaler.bin\"", "from_author": false }, { "body": "Same ehre + one for each process.", "diff_hunk": "@@ -560,6 +566,109 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scalar, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ state = self.get_state_dict(model)\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ torch.save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ state = self.scaler.state_dict()\n+ scalar_name = \"scaler.bin\"\n+ output_scalar_file = os.path.join(output_dir, scalar_name)\n+ torch.save(state, output_scalar_file)\n+ logger.info(f\"GradScalar state saved in {output_scalar_file}\")\n+ # Random states\n+ states = {}\n+ states_name = \"random_states.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state_all()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+\n+ def load_state(self, input_dir: str):\n+ \"\"\"\n+ Loads the current states of the model, optimizer, scalar, and RNG generators from `folder_name`.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder all relevant weights and states were saved in.\n+ \"\"\"\n+ # Check if folder exists\n+ input_dir = os.path.expanduser(input_dir)\n+ if not os.path.isdir(input_dir):\n+ raise ValueError(f\"Tried to find {input_dir} but folder does not exist\")\n+ logger.info(f\"Loading states from {input_dir}\")\n+\n+ # Model states\n+ for i, model in enumerate(self._models):\n+ weights_name = \"pytorch_model\"\n+ if i == 0:\n+ weights_name += f\"_{i}\"\n+ weights_name += \".bin\"\n+ input_model_file = os.path.join(input_dir, weights_name)\n+ self._models[i].load_state_dict(torch.load(input_model_file))\n+ logger.info(\"All model weights loaded successfully\")\n+ # Optimizer states\n+ for i, opt in enumerate(self._optimizers):\n+ optimizer_name = \"optimizer\"\n+ if i == 0:\n+ optimizer_name += f\"_{i}\"\n+ optimizer_name += \".pt\"\n+ input_optimizer_file = os.path.join(input_dir, optimizer_name)\n+ self._optimizers[i].load_state_dict(torch.load(input_optimizer_file))\n+ logger.info(\"All optimizer states loaded successfully\")\n+ # GradScalar state\n+ if self.scaler is not None:\n+ scaler_name = \"scaler.bin\"\n+ input_scaler_file = os.path.join(input_dir, scaler_name)\n+ self.scaler.load_state_dict(torch.load(input_scaler_file))\n+ logger.info(\"GradScalar state loaded successfully\")\n+ # Random states\n+ states = torch.load(os.path.join(input_dir, \"random_states.pkl\"))", "from_author": false }, { "body": "We could wrap all of this in some train function that does the training.", "diff_hunk": "@@ -0,0 +1,148 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import tempfile\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+def test_can_resume_training(args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(\n+ fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True\n+ )\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+ for epoch in range(3):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially", "from_author": false }, { "body": "Nit: we should use `unittest` here with `self.assertEqual` and `self.assertEqualDict` instead of those asserts.", "diff_hunk": "@@ -0,0 +1,148 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import tempfile\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+def test_can_resume_training(args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(\n+ fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True\n+ )\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+ for epoch in range(3):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ accelerator.load_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a2, b2) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ assert a == a2\n+ assert b == b2\n+ assert opt_state == opt_state2", "from_author": false }, { "body": "Same as above.", "diff_hunk": "@@ -0,0 +1,148 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import tempfile\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+def test_can_resume_training(args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(\n+ fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True\n+ )\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+ for epoch in range(3):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ accelerator.load_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a2, b2) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ assert a == a2\n+ assert b == b2\n+ assert opt_state == opt_state2\n+\n+ for epoch in range(2):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ # Save everything\n+ checkpoint = os.path.join(tmpdir, \"checkpoint\")\n+ accelerator.save_state(checkpoint)\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(checkpoint)\n+ for epoch in range(1):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a3, b3) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ assert a1 == a3\n+ assert b1 == b3\n+ assert opt_state1 == opt_state3", "from_author": false }, { "body": "This one being deprecated, I wouldn't pass it/create it.", "diff_hunk": "@@ -0,0 +1,148 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import tempfile\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+def test_can_resume_training(args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(\n+ fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True\n+ )\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+ for epoch in range(3):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ accelerator.load_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a2, b2) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ assert a == a2\n+ assert b == b2\n+ assert opt_state == opt_state2\n+\n+ for epoch in range(2):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ # Save everything\n+ checkpoint = os.path.join(tmpdir, \"checkpoint\")\n+ accelerator.save_state(checkpoint)\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(checkpoint)\n+ for epoch in range(1):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a3, b3) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ assert a1 == a3\n+ assert b1 == b3\n+ assert opt_state1 == opt_state3\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")", "from_author": false }, { "body": "I'd like some advice on how to convert my script -> a proper test. I tried looking in `test_multigpu` but still doesn't quite line up with how the testing workflow operates", "diff_hunk": "@@ -0,0 +1,148 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import tempfile\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+def test_can_resume_training(args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(\n+ fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True\n+ )\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+ for epoch in range(3):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ accelerator.load_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a2, b2) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ assert a == a2\n+ assert b == b2\n+ assert opt_state == opt_state2\n+\n+ for epoch in range(2):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ # Save everything\n+ checkpoint = os.path.join(tmpdir, \"checkpoint\")\n+ accelerator.save_state(checkpoint)\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(checkpoint)\n+ for epoch in range(1):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a3, b3) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ assert a1 == a3\n+ assert b1 == b3\n+ assert opt_state1 == opt_state3\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")", "from_author": true }, { "body": "```suggestion\r\n Saves the current states of the model, optimizer, scaler, and RNG generators.\r\n```\r\nIt's actually output_dir but since we document it just after, no need to mention it up there I believe.", "diff_hunk": "@@ -560,6 +562,36 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scaler, and RNG generators to `folder_name`.", "from_author": false }, { "body": "```suggestion\r\n Loads the current states of the model, optimizer, scaler, and RNG generators.\r\n```", "diff_hunk": "@@ -560,6 +562,36 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def save_state(self, output_dir: str):\n+ \"\"\"\n+ Saves the current states of the model, optimizer, scaler, and RNG generators to `folder_name`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ \"\"\"\n+ # Check if folder exists\n+ output_dir = os.path.expanduser(output_dir)\n+ os.makedirs(output_dir, exist_ok=True)\n+ logger.info(f\"Saving current state to {output_dir}\")\n+ weights = [self.get_state_dict(m) for m in self._models]\n+ return save_accelerator_state(output_dir, weights, self._optimizers, self.state.process_index, self.scaler)\n+\n+ def load_state(self, input_dir: str):\n+ \"\"\"\n+ Loads the current states of the model, optimizer, scaler, and RNG generators from `folder_name`.", "from_author": false }, { "body": "```suggestion\r\n# Copyright 2022 The HuggingFace Team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.", "from_author": false }, { "body": "```suggestion\r\n Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.\r\n```\r\nLet's leave the argument names for the arg descriptions and be moe \"English\" in the description (or if you want them, you need to put them in double backticks or add an `:obj:` to have them code-formatted)", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.", "from_author": false }, { "body": "```suggestion\r\n scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):\n+ A list of model states\n+ optimizers (:obj:`list`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`GradScaler`), Optional:", "from_author": false }, { "body": "```suggestion\r\n An optional gradient scaler instance to save\r\n```\r\n(The type is already described)", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):\n+ A list of model states\n+ optimizers (:obj:`list`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`GradScaler`), Optional:\n+ An optional `GradScaler` instance to save", "from_author": false }, { "body": "This also needs to use the `save` function (especially for TPUs).", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):\n+ A list of model states\n+ optimizers (:obj:`list`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`GradScaler`), Optional:\n+ An optional `GradScaler` instance to save\n+ \"\"\"\n+ # Model states\n+ for i, state in enumerate(model_states):\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = f\"{OPTIMIZER_NAME}.bin\" if i == 0 else f\"{OPTIMIZER_NAME}_{i}.bin\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)", "from_author": false }, { "body": "```suggestion\r\n model_states (:obj:`List[torch.nn.Module]`):\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):", "from_author": false }, { "body": "```suggestion\r\n optimizers (:obj:`List[torch.optim.Optimizer]`):\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):\n+ A list of model states\n+ optimizers (:obj:`list`):", "from_author": false }, { "body": "Fix those as above.", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):\n+ A list of model states\n+ optimizers (:obj:`list`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`GradScaler`), Optional:\n+ An optional `GradScaler` instance to save\n+ \"\"\"\n+ # Model states\n+ for i, state in enumerate(model_states):\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = f\"{OPTIMIZER_NAME}.bin\" if i == 0 else f\"{OPTIMIZER_NAME}_{i}.bin\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScaler state\n+ if scaler is not None:\n+ state = scaler.state_dict()\n+ output_scaler_file = os.path.join(output_dir, SCALER_NAME)\n+ torch.save(state, output_scaler_file)\n+ logger.info(f\"Gradient scaler state saved in {output_scaler_file}\")\n+ # Random number generator states\n+ states = {}\n+ states_name = f\"{RNG_STATE_NAME}_{process_index}.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+ return output_dir\n+\n+\n+def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=None):\n+ \"\"\"\n+ Loads states of the `models`, `optimizers`, scaler, and RNG generators from `input_dir`.\n+\n+ Args:\n+ input_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to load all relevant weights and states.\n+ model_stmodelsates (:obj:`list`):\n+ A list of model instances\n+ optimizers (:obj:`list`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`GradScaler`), Optional:", "from_author": false }, { "body": "```suggestion\r\n Loads states of the models, optimizers, scaler, and RNG generators from a given directory.\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import random\n+from typing import List\n+\n+import numpy as np\n+import torch\n+from torch.cuda.amp import GradScaler\n+\n+from .state import is_tpu_available\n+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save\n+\n+\n+if is_tpu_available():\n+ import torch_xla.core.xla_model as xm\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n+\n+\n+def save_accelerator_state(\n+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None\n+):\n+ \"\"\"\n+ Saves the current states of the `models`, `optimizers`, scaler, and RNG generators to `output_dir`.\n+\n+ Args:\n+ output_dir (:obj:`str` or :obj:`os.PathLike`):\n+ The name of the folder to save all relevant weights and states.\n+ model_states (:obj:`list`):\n+ A list of model states\n+ optimizers (:obj:`list`):\n+ A list of optimizer instances\n+ process_index (:obj:`int`):\n+ The current process index in the Accelerator state\n+ scaler (:obj:`GradScaler`), Optional:\n+ An optional `GradScaler` instance to save\n+ \"\"\"\n+ # Model states\n+ for i, state in enumerate(model_states):\n+ weights_name = f\"{MODEL_NAME}.bin\" if i == 0 else f\"{MODEL_NAME}_{i}.bin\"\n+ output_model_file = os.path.join(output_dir, weights_name)\n+ save(state, output_model_file)\n+ logger.info(f\"Model weights saved in {output_model_file}\")\n+ # Optimizer states\n+ for i, opt in enumerate(optimizers):\n+ state = opt.state_dict()\n+ optimizer_name = f\"{OPTIMIZER_NAME}.bin\" if i == 0 else f\"{OPTIMIZER_NAME}_{i}.bin\"\n+ output_optimizer_file = os.path.join(output_dir, optimizer_name)\n+ torch.save(state, output_optimizer_file)\n+ logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n+ # GradScaler state\n+ if scaler is not None:\n+ state = scaler.state_dict()\n+ output_scaler_file = os.path.join(output_dir, SCALER_NAME)\n+ torch.save(state, output_scaler_file)\n+ logger.info(f\"Gradient scaler state saved in {output_scaler_file}\")\n+ # Random number generator states\n+ states = {}\n+ states_name = f\"{RNG_STATE_NAME}_{process_index}.pkl\"\n+ states[\"random_state\"] = random.getstate()\n+ states[\"numpy_random_seed\"] = np.random.get_state()\n+ states[\"torch_manual_seed\"] = torch.get_rng_state()\n+ states[\"torch_cuda_manual_seed\"] = torch.cuda.get_rng_state()\n+ # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ states[\"xm_seed\"] = torch.tensor(xm.get_rng_state())\n+ output_states_file = os.path.join(output_dir, states_name)\n+ torch.save(states, output_states_file)\n+ logger.info(f\"Random states saved in {output_states_file}\")\n+ return output_dir\n+\n+\n+def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=None):\n+ \"\"\"\n+ Loads states of the `models`, `optimizers`, scaler, and RNG generators from `input_dir`.", "from_author": false }, { "body": "gt stands for? An expanded name would be easier to read :-)\r\nSince we have `test` below it's probably ground truth, so `ground_truth_rands` or if you wnat shorter `expected_rands`?", "diff_hunk": "@@ -0,0 +1,144 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import random\n+import tempfile\n+import unittest\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+def train(num_epochs, model, dataloader, optimizer, accelerator):\n+ \"Trains for `num_epochs`\"\n+ rands = []\n+ for epoch in range(num_epochs):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ rands.append(random.random()) # Introduce some randomness\n+ return rands\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+class CheckpointTest(unittest.TestCase):\n+ def test_can_resume_training(self, args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True)\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+\n+ gt_rands = train(3, model, train_dataloader, optimizer, accelerator)", "from_author": false }, { "body": "No need to make this an executable, pytest will do that on its own. I think we can remove the `args` from the `test_can_resume_training` method.", "diff_hunk": "@@ -0,0 +1,144 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import random\n+import tempfile\n+import unittest\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+def train(num_epochs, model, dataloader, optimizer, accelerator):\n+ \"Trains for `num_epochs`\"\n+ rands = []\n+ for epoch in range(num_epochs):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ rands.append(random.random()) # Introduce some randomness\n+ return rands\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+class CheckpointTest(unittest.TestCase):\n+ def test_can_resume_training(self, args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True)\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+\n+ gt_rands = train(3, model, train_dataloader, optimizer, accelerator)\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ accelerator.load_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a2, b2) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ self.assertEqual(a, a2)\n+ self.assertEqual(b, b2)\n+ self.assertEqual(opt_state, opt_state2)\n+ set_seed(42)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Ensure all numbers align\n+\n+ test_rands = train(2, model, train_dataloader, optimizer, accelerator)\n+ # Save everything\n+ checkpoint = os.path.join(tmpdir, \"checkpoint\")\n+ accelerator.save_state(checkpoint)\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(checkpoint)\n+ test_rands += train(1, model, train_dataloader, optimizer, accelerator)\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a3, b3) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ self.assertEqual(a1, a3)\n+ self.assertEqual(b1, b3)\n+ self.assertEqual(opt_state1, opt_state3)\n+ self.assertEqual(gt_rands, test_rands)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ CheckpointTest().test_can_resume_training(args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()", "from_author": false }, { "body": "@sgugger how would we delegate the tests to be ran on cpu, mixed precision, etc then in that case? ", "diff_hunk": "@@ -0,0 +1,144 @@\n+# Copyright 2022 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+import random\n+import tempfile\n+import unittest\n+\n+import torch\n+from torch import nn\n+from torch.utils.data import DataLoader, TensorDataset\n+\n+from accelerate import Accelerator\n+from accelerate.utils import set_seed\n+\n+\n+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):\n+ \"Generates a tuple of dummy DataLoaders to test with\"\n+\n+ def get_dataset(n_batches):\n+ x = torch.randn(batch_size * n_batches, 1)\n+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))\n+\n+ train_dataset = get_dataset(n_train_batches)\n+ valid_dataset = get_dataset(n_valid_batches)\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+ return (train_dataloader, valid_dataloader)\n+\n+\n+def train(num_epochs, model, dataloader, optimizer, accelerator):\n+ \"Trains for `num_epochs`\"\n+ rands = []\n+ for epoch in range(num_epochs):\n+ # Train quickly\n+ model.train()\n+ for step, batch in enumerate(dataloader):\n+ x, y = batch\n+ outputs = model(x)\n+ loss = torch.nn.functional.mse_loss(outputs, y)\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ optimizer.zero_grad()\n+ rands.append(random.random()) # Introduce some randomness\n+ return rands\n+\n+\n+class DummyModel(nn.Module):\n+ \"Simple model to do y=mx+b\"\n+\n+ def __init__(self):\n+ super().__init__()\n+ self.a = nn.Parameter(torch.randn(1))\n+ self.b = nn.Parameter(torch.randn(1))\n+\n+ def forward(self, x):\n+ return x * self.a + self.b\n+\n+\n+class CheckpointTest(unittest.TestCase):\n+ def test_can_resume_training(self, args):\n+ with tempfile.TemporaryDirectory() as tmpdir:\n+ set_seed(42)\n+ model = DummyModel()\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Train baseline\n+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, device_placement=True)\n+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, valid_dataloader\n+ )\n+ # Save initial\n+ initial = os.path.join(tmpdir, \"initial\")\n+ accelerator.save_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a, b) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state = optimizer.state_dict()\n+\n+ gt_rands = train(3, model, train_dataloader, optimizer, accelerator)\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a1, b1) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state1 = optimizer.state_dict()\n+\n+ # Train partially\n+ accelerator.load_state(initial)\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a2, b2) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state2 = optimizer.state_dict()\n+ self.assertEqual(a, a2)\n+ self.assertEqual(b, b2)\n+ self.assertEqual(opt_state, opt_state2)\n+ set_seed(42)\n+ train_dataloader, valid_dataloader = dummy_dataloaders()\n+ # Ensure all numbers align\n+\n+ test_rands = train(2, model, train_dataloader, optimizer, accelerator)\n+ # Save everything\n+ checkpoint = os.path.join(tmpdir, \"checkpoint\")\n+ accelerator.save_state(checkpoint)\n+\n+ # Load everything back in and make sure all states work\n+ accelerator.load_state(checkpoint)\n+ test_rands += train(1, model, train_dataloader, optimizer, accelerator)\n+\n+ model_unwrapped = accelerator.unwrap_model(model)\n+ (a3, b3) = model_unwrapped.a.item(), model_unwrapped.b.item()\n+ opt_state3 = optimizer.state_dict()\n+ self.assertEqual(a1, a3)\n+ self.assertEqual(b1, b3)\n+ self.assertEqual(opt_state1, opt_state3)\n+ self.assertEqual(gt_rands, test_rands)\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ CheckpointTest().test_can_resume_training(args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()", "from_author": true } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/255", "pr_id": 855956281 }, { "diff": "diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md\nnew file mode 100644\nindex 000000000..66b0a8538\n--- /dev/null\n+++ b/CONTRIBUTING.md\n@@ -0,0 +1,235 @@\n+<!---\n+Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to πŸ€— Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+Some of the ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_congig.yml`)\n+\n+### Do you want a new feature?\n+\n+A good feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Submitting a pull request (PR)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+πŸ€— Accelerate. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote. The following command\n+ assumes you have your public SSH key uploaded to GitHub. See the following guide for more\n+ [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes, and do this for every new PR you work on.\n+\n+ Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):\n+\n+ ```bash\n+ $ git checkout main\n+ $ git fetch upstream\n+ $ git merge upstream/main\n+ ```\n+\n+ Once your `main` branch is synchronized, create a new branch from it:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:\n+\n+ ```bash\n+ $ pip install -e \".[quality]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+ \n+ > For the following commands leveraging the `make` utility, we recommend using the WSL system when running on\n+ > Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about).\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master\n+ ```\n+\n+ Push the changes to your account using:\n+\n+ ```bash\n+ $ git push -u origin a-descriptive-name-for-my-changes\n+ ```\n+\n+6. Once you are satisfied (**and the checklist below is happy too**), go to the\n+ webpage of your fork on GitHub. Click on 'Pull request' to send your changes\n+ to the project maintainers for review.\n+\n+7. It's ok if maintainers ask you for changes. It happens to core contributors\n+ too! So everyone can see the changes in the Pull request, work in your local\n+ branch and push the changes to your fork. They will automatically appear in\n+ the pull request.\n+\n+\n+### Checklist\n+\n+1. The title of your pull request should be a summary of its contribution;\n+2. If your pull request addresses an issue, please mention the issue number in\n+ the pull request description to make sure they are linked (and people\n+ consulting the issue know you are working on it);\n+3. To indicate a work in progress please prefix the title with `[WIP]`, or mark\n+ the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate\n+ it from PRs ready to be merged;\n+4. Make sure existing tests pass;\n+5. Add high-coverage tests. No quality testing = no merge.\n+\n+See an example of a good PR here: https://github.com/huggingface/accelerate/pull/255\n+\n+### Tests\n+\n+An extensive test suite is included to test the library behavior and several examples. Library tests can be found in\n+the [tests folder](https://github.com/huggingface/accelerate/tree/master/tests).\n+\n+We use `pytest` in order to run the tests. From the root of the\n+repository, here's how to run tests with `pytest` for the library:\n+\n+```bash\n+$ python -m pytest -sv ./tests\n+```\n+\n+In fact, that's how `make test` is implemented (sans the `pip install` line)!\n+\n+You can specify a smaller set of tests in order to test only the feature\n+you're working on.\n\\ No newline at end of file\n", "code_comments": [ { "body": "Not sure if that's important given you're watching the repo.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.", "from_author": true }, { "body": "Yeah we can remove that part ;-)", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.", "from_author": false }, { "body": "Missing the emoji here.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?", "from_author": false }, { "body": "```suggestion\r\n* Provide us with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_congig.yml`)\r\n* Provide the command you used to launch your script (for instance `accelerate launch train_script.py`)\r\n* Provide the *full* traceback if an exception is raised.\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.", "from_author": false }, { "body": "```suggestion\r\n### Do you want a new feature?\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?", "from_author": false }, { "body": "```suggestion\r\nπŸ€— Accelerate. `git` is not the easiest tool to use but it has the greatest\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest", "from_author": false }, { "body": "```suggestion\r\n $ pip install -e \".[quality]\"\r\n```\r\nThere is no dev install for now ;-)", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"", "from_author": false }, { "body": "shouldn't this go under `LICENSE` file instead?", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->", "from_author": false }, { "body": "```suggestion\r\nSome of the ways you can contribute to Accelerate:\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:", "from_author": false }, { "body": "Some or most of this information could also go to an issue template. The templates also support _form_ format ([example](https://github.com/scikit-learn/scikit-learn/issues/new?assignees=&labels=Bug%2CNeeds+Triage&template=bug_report.yml)) and people reporting a bug are probably not reading this document, but they would see that form or that template.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?", "from_author": false }, { "body": "we found that having a `get_versions()` which returns all relevant versions is helpful. Users would just paste the output of that in the issue.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.", "from_author": false }, { "body": "```suggestion\r\nA good feature request addresses the following points:\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:", "from_author": false }, { "body": "```suggestion\r\n## Submitting a pull request (PR)\r\n```\r\n\r\n\"Start contributing\" implies the other ones were not contributions.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)", "from_author": false }, { "body": "this assumes the user has their public ssh key uploaded to github, it deserves a note.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git", "from_author": false }, { "body": "```suggestion\r\n3. Create a new branch to hold your development changes, and do this for every new PR you work on:\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:", "from_author": false }, { "body": "either checkout from `upstream/master`, or have them sync those two first, e.g.:\r\n\r\n- Synchronize your ``main`` branch with the ``upstream/main`` branch,\r\n more details on `GitHub Docs <https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork>`_:\r\n\r\n .. prompt:: bash $\r\n\r\n git checkout main\r\n git fetch upstream\r\n git merge upstream/main\r\n", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes", "from_author": false }, { "body": "```suggestion\r\n4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:\r\n```", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:", "from_author": false }, { "body": "on a windows machine they'd need a git bash or similar to run this I think.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style", "from_author": false }, { "body": "you could also add them as git hooks, which have helped us with users' contributions.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:", "from_author": false }, { "body": "in longer PRs (time-wise), rebase can be very tricky for beginner users, merge usually makes it much easier for them, and if you're squash and merging, the merge commit wouldn't show up in your history.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master", "from_author": false }, { "body": "or create a _draft_ PR?", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master\n+ ```\n+\n+ Push the changes to your account using:\n+\n+ ```bash\n+ $ git push -u origin a-descriptive-name-for-my-changes\n+ ```\n+\n+6. Once you are satisfied (**and the checklist below is happy too**), go to the\n+ webpage of your fork on GitHub. Click on 'Pull request' to send your changes\n+ to the project maintainers for review.\n+\n+7. It's ok if maintainers ask you for changes. It happens to core contributors\n+ too! So everyone can see the changes in the Pull request, work in your local\n+ branch and push the changes to your fork. They will automatically appear in\n+ the pull request.\n+\n+\n+### Checklist\n+\n+1. The title of your pull request should be a summary of its contribution;\n+2. If your pull request addresses an issue, please mention the issue number in\n+ the pull request description to make sure they are linked (and people\n+ consulting the issue know you are working on it);\n+3. To indicate a work in progress please prefix the title with `[WIP]`. These\n+ are useful to avoid duplicated work, and to differentiate it from PRs ready\n+ to be merged;", "from_author": false }, { "body": "adding a link to a _good_ PR may help them by seeing a good example.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master\n+ ```\n+\n+ Push the changes to your account using:\n+\n+ ```bash\n+ $ git push -u origin a-descriptive-name-for-my-changes\n+ ```\n+\n+6. Once you are satisfied (**and the checklist below is happy too**), go to the\n+ webpage of your fork on GitHub. Click on 'Pull request' to send your changes\n+ to the project maintainers for review.\n+\n+7. It's ok if maintainers ask you for changes. It happens to core contributors\n+ too! So everyone can see the changes in the Pull request, work in your local\n+ branch and push the changes to your fork. They will automatically appear in\n+ the pull request.\n+\n+\n+### Checklist", "from_author": false }, { "body": "There is also a `LICENSE` at the root of the repo. We have an additional license at the top of every file, with the appropriate copyrights for the code in that specific file. If some code was taken from somewhere else or from another org, we add a mention to that org in the copyright here.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->", "from_author": true }, { "body": "Sounds good. Ideally this would be done in a follow-up PR.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?", "from_author": true }, { "body": "Nice! That's what we use in `transformers`, with `transformers-cli env`. I agree it would be nice to port it to `accelerate`.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.", "from_author": true }, { "body": "I won't have the bandwidth right now to spin a tutorial about adding git hooks for our checks, but I agree that it would definitely have its place here. \r\n\r\nFeel free to push a commit directly to the `contributing-guide` branch with it, otherwise happy to do it in a follow-up PR when I have the bandwidth.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:", "from_author": true }, { "body": "What we've seen is that merging can cause a lot of issues with github not understanding what was done with the branch, and users ending up with all the commits on their branch (which of course came with all file changes accompanying these commits).\r\n\r\nWe then ask the users to close the PR and to open a new one with the same branch which generally solves the issue, but it adds more friction than necessary, so up to now we've mostly asked users to rebase their branches.\r\n\r\nIf you feel strongly, happy to add an additional mention for merge.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master", "from_author": true }, { "body": "```suggestion\r\nCopyright 2022 The HuggingFace Team. All rights reserved.\r\n```", "diff_hunk": "@@ -0,0 +1,235 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.", "from_author": false }, { "body": "If you mean including the git hooks in the repository itself, I think a lot of us in the team are averse to having them in our own setups so it would be polarising :)", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:", "from_author": true }, { "body": "Yeah I saw. We can follow up on that later.", "diff_hunk": "@@ -0,0 +1,220 @@\n+<!---\n+Copyright 2020 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+There are 4 ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+Please ping @sgugger on your contribution.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the *full* traceback if an exception is raised.\n+\n+### Do you want a new feature (that is not a model)?\n+\n+A world-class feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Start contributing! (Pull Requests)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+`accelerate`. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote:\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a virtual environment:\n+\n+ ```bash\n+ $ pip install -e \".[dev]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:", "from_author": false }, { "body": "Just a little thing, it's `git rebase upstream/main`", "diff_hunk": "@@ -0,0 +1,235 @@\n+<!---\n+Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to πŸ€— Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+Some of the ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_congig.yml`)\n+\n+### Do you want a new feature?\n+\n+A good feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Submitting a pull request (PR)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+πŸ€— Accelerate. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote. The following command\n+ assumes you have your public SSH key uploaded to GitHub. See the following guide for more\n+ [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes, and do this for every new PR you work on.\n+\n+ Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):\n+\n+ ```bash\n+ $ git checkout main\n+ $ git fetch upstream\n+ $ git merge upstream/main\n+ ```\n+\n+ Once your `main` branch is synchronized, create a new branch from it:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:\n+\n+ ```bash\n+ $ pip install -e \".[quality]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+ \n+ > For the following commands leveraging the `make` utility, we recommend using the WSL system when running on\n+ > Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about).\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master", "from_author": false }, { "body": "Good catch! Do you want to make a PR to fix this?", "diff_hunk": "@@ -0,0 +1,235 @@\n+<!---\n+Copyright 2022 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# How to contribute to πŸ€— Accelerate?\n+\n+Everyone is welcome to contribute, and we value everybody's contribution. Code\n+is thus not the only way to help the community. Answering questions, helping\n+others, reaching out and improving the documentations are immensely valuable to\n+the community.\n+\n+It also helps us if you spread the word: reference the library from blog posts\n+on the awesome projects it made possible, shout out on Twitter every time it has\n+helped you, or simply star the repo to say \"thank you\".\n+\n+Whichever way you choose to contribute, please be mindful to respect our\n+[code of conduct](https://github.com/huggingface/accelerate/blob/master/CODE_OF_CONDUCT.md).\n+\n+## You can contribute in so many ways!\n+\n+Some of the ways you can contribute to Accelerate:\n+* Fixing outstanding issues with the existing code;\n+* Contributing to the examples or to the documentation;\n+* Submitting issues related to bugs or desired new features.\n+\n+## Submitting a new issue or feature request\n+\n+Do your best to follow these guidelines when submitting an issue or a feature\n+request. It will make it easier for us to come back to you quickly and with good\n+feedback.\n+\n+### Did you find a bug?\n+\n+The πŸ€— Accelerate library is robust and reliable thanks to the users who notify us of\n+the problems they encounter. So thank you for reporting an issue.\n+\n+First, we would really appreciate it if you could **make sure the bug was not\n+already reported** (use the search bar on Github under Issues).\n+\n+Did not find it? :( So we can act quickly on it, please follow these steps:\n+\n+* Include your **OS type and version**, the versions of **Python** and **PyTorch**.\n+* A short, self-contained, code snippet that allows us to reproduce the bug in\n+ less than 30s;\n+* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_congig.yml`)\n+\n+### Do you want a new feature?\n+\n+A good feature request addresses the following points:\n+\n+1. Motivation first:\n+* Is it related to a problem/frustration with the library? If so, please explain\n+ why. Providing a code snippet that demonstrates the problem is best.\n+* Is it related to something you would need for a project? We'd love to hear\n+ about it!\n+* Is it something you worked on and think could benefit the community?\n+ Awesome! Tell us what problem it solved for you.\n+2. Write a *full paragraph* describing the feature;\n+3. Provide a **code snippet** that demonstrates its future use;\n+4. In case this is related to a paper, please attach a link;\n+5. Attach any additional information (drawings, screenshots, etc.) you think may help.\n+\n+If your issue is well written we're already 80% of the way there by the time you\n+post it.\n+\n+## Submitting a pull request (PR)\n+\n+Before writing code, we strongly advise you to search through the existing PRs or\n+issues to make sure that nobody is already working on the same thing. If you are\n+unsure, it is always a good idea to open an issue to get some feedback.\n+\n+You will need basic `git` proficiency to be able to contribute to\n+πŸ€— Accelerate. `git` is not the easiest tool to use but it has the greatest\n+manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro\n+Git](https://git-scm.com/book/en/v2) is a very good reference.\n+\n+Follow these steps to start contributing:\n+\n+1. Fork the [repository](https://github.com/huggingface/accelerate) by\n+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code\n+ under your GitHub user account.\n+\n+2. Clone your fork to your local disk, and add the base repository as a remote. The following command\n+ assumes you have your public SSH key uploaded to GitHub. See the following guide for more\n+ [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).\n+\n+ ```bash\n+ $ git clone git@github.com:<your Github handle>/accelerate.git\n+ $ cd accelerate\n+ $ git remote add upstream https://github.com/huggingface/accelerate.git\n+ ```\n+\n+3. Create a new branch to hold your development changes, and do this for every new PR you work on.\n+\n+ Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):\n+\n+ ```bash\n+ $ git checkout main\n+ $ git fetch upstream\n+ $ git merge upstream/main\n+ ```\n+\n+ Once your `main` branch is synchronized, create a new branch from it:\n+\n+ ```bash\n+ $ git checkout -b a-descriptive-name-for-my-changes\n+ ```\n+\n+ **Do not** work on the `master` branch.\n+\n+4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:\n+\n+ ```bash\n+ $ pip install -e \".[quality]\"\n+ ```\n+\n+ (If accelerate was already installed in the virtual environment, remove\n+ it with `pip uninstall accelerate` before reinstalling it in editable\n+ mode with the `-e` flag.)\n+\n+5. Develop the features on your branch.\n+\n+ As you work on the features, you should make sure that the test suite\n+ passes. You should run the tests impacted by your changes like this (see \n+ below an explanation regarding the environment variable):\n+\n+ ```bash\n+ $ pytest tests/<TEST_TO_RUN>.py\n+ ```\n+ \n+ > For the following commands leveraging the `make` utility, we recommend using the WSL system when running on\n+ > Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about).\n+\n+ You can also run the full suite with the following command.\n+\n+ ```bash\n+ $ make test\n+ ```\n+\n+ `accelerate` relies on `black` and `isort` to format its source code\n+ consistently. After you make changes, apply automatic style corrections and code verifications\n+ that can't be automated in one go with:\n+\n+ This target is also optimized to only work with files modified by the PR you're working on.\n+\n+ If you prefer to run the checks one after the other, the following command apply the\n+ style corrections:\n+\n+ ```bash\n+ $ make style\n+ ```\n+\n+ `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality\n+ control runs in CI, however you can also run the same checks with:\n+\n+ ```bash\n+ $ make quality\n+ ```\n+\n+ Once you're happy with your changes, add changed files using `git add` and\n+ make a commit with `git commit` to record your changes locally:\n+\n+ ```bash\n+ $ git add modified_file.py\n+ $ git commit\n+ ```\n+\n+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).\n+\n+ It is a good idea to sync your copy of the code with the original\n+ repository regularly. This way you can quickly account for changes:\n+\n+ ```bash\n+ $ git fetch upstream\n+ $ git rebase upstream/master", "from_author": false } ], "context": [ { "body": "Will merge it like that, with the following action items:\r\n- Work on adding an issue template (for both `huggingface_hub` and `accelerate`)\r\n- Have a similar method to `transformers-cli env` for `accelerate` (can be a `get_versions()` like Adrin mentions)\r\n\r\n@sgugger, ok for you if I open issues for that on `accelerate`?", "from_author": true }, { "body": "Fine by me! It can be a good first issue!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/254", "pr_id": 855857944 }, { "diff": "diff --git a/setup.py b/setup.py\nindex f06ab3c99..bec3b9805 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n from setuptools import find_packages\n \n extras = {}\n-extras[\"quality\"] = [\"black == 21.4b0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n+extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = [\n \"docutils==0.16.0\",\n \"recommonmark\",\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/250", "pr_id": 843135021 }, { "diff": "diff --git a/README.md b/README.md\nindex cbc23ed10..c27131808 100644\n--- a/README.md\n+++ b/README.md\n@@ -210,8 +210,11 @@ You shouldn't use πŸ€— Accelerate if you don't want to write a training loop you\n \n If you like the simplicity of πŸ€— Accelerate but would prefer a higher-level abstraction around your training loop, some frameworks that are built on top of πŸ€— Accelerate are listed below:\n \n-* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!\n+* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common \"breakpoints\" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).\n+* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model train, and inference logic.\n * [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.\n+* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!\n+\n \n ## Installation\n \n", "code_comments": [], "context": [ { "body": "btw, it would be great if you could comment on my \"Accelerator mixin\" - [Engine](https://github.com/Scitator/animus/blob/main/animus/torch/engine.py) in terms of \"a bit more user-friendly Accelerator API\". I found it useful for me to run pipelines within notebooks or python scripts without CLI.", "from_author": true }, { "body": "Ah thanks! I am wrapping up some things in Transformers and will go back to Accelerate pretty soon (haven't forgotten #215 !)\r\n\r\nYou mixins look great! Having one per kind of distributed setup is a bit against the philosophy of Accelerate (where the same exact code runs on any setup) but I like the abstraction.\r\n\r\nTo run things in notebooks, we also have `notebook_launcher`, I don't know if you tried it or not.", "from_author": false }, { "body": "yeah, I found `notebook_launcher` an interesting functional alternative... nevertheless, from my previous experience I knew a few limitations of pure functional way, so... a small class-based API was created ;)", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/249", "pr_id": 842502625 }, { "diff": "diff --git a/.gitignore b/.gitignore\nindex 3b89c957d..f41377320 100644\n--- a/.gitignore\n+++ b/.gitignore\n@@ -130,3 +130,6 @@ dmypy.json\n \n # VSCode\n .vscode\n+\n+# IntelliJ\n+.idea\n\\ No newline at end of file\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex f4164613f..bb7c7f5b1 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -73,7 +73,7 @@ def __getitem__(self, idx):\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mix_precision)\n \n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n@@ -187,6 +187,15 @@ def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n parser.add_argument(\"--data_dir\", required=True, help=\"The data folder on disk.\")\n parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex a1f310271..2f869bd0f 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -51,7 +51,7 @@\n \n def training_function(config, args):\n # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu, mixed_precision=args.mixed_precision)\n \n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n@@ -163,6 +163,15 @@ def collate_fn(examples):\n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ type=str,\n+ default=\"no\",\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether to use mixed precision. Choose\"\n+ \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n+ \"and an Nvidia Ampere GPU.\",\n+ )\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 76c3067a3..ed3b2acb7 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -14,6 +14,7 @@\n \n import gc\n import os\n+import warnings\n from contextlib import contextmanager\n from typing import List, Optional, Union\n \n@@ -60,10 +61,11 @@ class Accelerator:\n :obj:`True` the actual batch size used will be the same on any kind of distributed processes, but it must\n be a round multiple of the :obj:`num_processes` you are using. If :obj:`False`, actual batch size used will\n be the one set in your script multiplied by the number of processes.\n- fp16 (:obj:`bool`, `optional`):\n- Whether or not to use mixed precision training. Will default to the value in the environment variable\n- :obj:`USE_FP16`, which will use the default value in the accelerate config of the current system or the\n- flag passed with the :obj:`accelerate.launch` command.\n+ mixed_precision (:obj:`str`, `optional`):\n+ Whether or not to use mixed precision training (fp16 or bfloat16). Choose from 'no','fp16','bf16'. Will\n+ default to the value in the environment variable :obj:`MIXED_PRECISION`, which will use the default value\n+ in the accelerate config of the current system or the flag passed with the :obj:`accelerate.launch`\n+ command. 'fp16' requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher.\n cpu (:obj:`bool`, `optional`):\n Whether or not to force the script to execute on CPU. Will ignore GPU available if set to :obj:`True` and\n force the execution on one process only.\n@@ -101,12 +103,25 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n+ mixed_precision: str = None,\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+\n+ if mixed_precision is not None:\n+ mixed_precision = mixed_precision.lower()\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(\n+ f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"\n+ )\n+\n+ if fp16:\n+ warnings.warn('fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n+\n if deepspeed_plugin is None: # init from env variables\n deepspeed_plugin = DeepSpeedPlugin() if os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" else None\n else:\n@@ -139,7 +154,11 @@ def __init__(\n \n kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}\n self.state = AcceleratorState(\n- fp16=fp16, cpu=cpu, deepspeed_plugin=deepspeed_plugin, _from_accelerator=True, **kwargs\n+ mixed_precision=mixed_precision,\n+ cpu=cpu,\n+ deepspeed_plugin=deepspeed_plugin,\n+ _from_accelerator=True,\n+ **kwargs,\n )\n \n self.device_placement = device_placement\n@@ -149,8 +168,18 @@ def __init__(\n # Mixed precision attributes\n self.scaler = None\n self.native_amp = False\n- if self.state.use_fp16:\n+ if self.state.mixed_precision == \"fp16\":\n self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.6\")\n+ if version.parse(torch.__version__) < version.parse(\"1.6\"):\n+ raise ValueError(\"fp16 mixed precision requires PyTorch >= 1.6\")\n+\n+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n+ elif self.state.mixed_precision == \"bf16\":\n+ self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.10\")\n+ if mixed_precision == \"bf16\" and version.parse(torch.__version__) < version.parse(\"1.10\"):\n+ raise ValueError(\"bf16 mixed precision requires PyTorch >= 1.10\")\n+\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n \n@@ -195,11 +224,20 @@ def is_local_main_process(self):\n \n @property\n def use_fp16(self):\n+ return self.mixed_precision != \"no\"\n+\n+ @property\n+ def mixed_precision(self):\n if self.distributed_type == DistributedType.DEEPSPEED:\n- use_fp16 = self.state.deepspeed_plugin.deepspeed_config[\"fp16\"][\"enabled\"]\n+ if self.state.deepspeed_plugin.deepspeed_config[\"fp16\"][\"enabled\"]:\n+ mixed_precision = \"fp16\"\n+ elif self.state.deepspeed_plugin.deepspeed_config[\"bf16\"][\"enabled\"]:\n+ mixed_precision = \"bf16\"\n+ else:\n+ mixed_precision = \"no\"\n else:\n- use_fp16 = self.state.use_fp16\n- return use_fp16\n+ mixed_precision = self.state.mixed_precision\n+ return mixed_precision\n \n @contextmanager\n def local_main_process_first(self):\n@@ -311,7 +349,12 @@ def prepare_model(self, model):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n- model.forward = torch.cuda.amp.autocast()(model.forward)\n+ if self.mixed_precision == \"fp16\" and version.parse(torch.__version__) >= version.parse(\"1.10\"):\n+ model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)\n+ elif self.mixed_precision == \"bf16\":\n+ model.forward = torch.cuda.amp.autocast(dtype=torch.bfloat16)(model.forward)\n+ else:\n+ model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n return model\n \n@@ -577,7 +620,13 @@ def autocast(self):\n different will happen otherwise.\n \"\"\"\n if self.native_amp:\n- autocast_context = torch.cuda.amp.autocast()\n+ if self.mixed_precision == \"fp16\" and version.parse(torch.__version__) >= version.parse(\"1.10\"):\n+ autocast_context = torch.cuda.amp.autocast(dtype=torch.float16)\n+ elif self.mixed_precision == \"bf16\":\n+ autocast_context = torch.cuda.amp.autocast(dtype=torch.bfloat16)\n+ else:\n+ autocast_context = torch.cuda.amp.autocast()\n+\n autocast_context.__enter__()\n yield\n autocast_context.__exit__()\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex eaaad20f2..534ea1526 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -103,20 +103,19 @@ def get_cluster_input():\n )\n \n if distributed_type != DistributedType.TPU:\n- fp16 = _ask_field(\n- \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n- _convert_yes_no_to_bool,\n- default=False,\n- error_message=\"Please enter yes or no.\",\n+ mixed_precision = _ask_field(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: \",\n+ lambda x: str(x).lower(),\n+ default=\"no\",\n )\n else:\n- fp16 = False\n+ mixed_precision = \"no\"\n \n return ClusterConfig(\n compute_environment=ComputeEnvironment.LOCAL_MACHINE,\n distributed_type=distributed_type,\n num_processes=num_processes,\n- fp16=fp16,\n+ mixed_precision=mixed_precision,\n machine_rank=machine_rank,\n num_machines=num_machines,\n main_process_ip=main_process_ip,\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 39c00c280..891580ab5 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -66,7 +66,7 @@ def load_config_from_file(config_file):\n class BaseConfig:\n compute_environment: ComputeEnvironment\n distributed_type: Union[DistributedType, SageMakerDistributedType]\n- fp16: bool\n+ mixed_precision: str\n \n def to_dict(self):\n result = self.__dict__\n@@ -83,6 +83,10 @@ def from_json_file(cls, json_file=None):\n config_dict = json.load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+ if \"mixed_precision\" not in config_dict:\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n+ if \"fp16\" in config_dict: # Convert the config to the new format.\n+ del config_dict[\"fp16\"]\n return cls(**config_dict)\n \n def to_json_file(self, json_file):\n@@ -97,6 +101,12 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict = yaml.safe_load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+\n+ if \"mixed_precision\" not in config_dict:\n+ config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n+ if \"fp16\" in config_dict: # Convert the config to the new format.\n+ del config_dict[\"fp16\"]\n+\n return cls(**config_dict)\n \n def to_yaml_file(self, yaml_file):\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex feff0aecf..3b0fc78dd 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -20,7 +20,7 @@\n from accelerate.utils import is_boto3_available\n \n from .config_args import SageMakerConfig\n-from .config_utils import _ask_field, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool\n+from .config_utils import _ask_field, _convert_sagemaker_distributed_mode\n \n \n if is_boto3_available():\n@@ -139,11 +139,11 @@ def get_sagemaker_input():\n lambda x: int(x),\n default=2,\n )\n- fp16 = _ask_field(\n- \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n- _convert_yes_no_to_bool,\n- default=False,\n- error_message=\"Please enter yes or no.\",\n+\n+ mixed_precision = _ask_field(\n+ \"Do you wish to use FP16 or BF16 (mixed precision)? [No/FP16/BF16]: \",\n+ lambda x: str(x),\n+ default=\"No\",\n )\n \n return SageMakerConfig(\n@@ -153,6 +153,6 @@ def get_sagemaker_input():\n profile=aws_profile,\n region=aws_region,\n iam_role_name=iam_role_name,\n- fp16=fp16,\n+ mixed_precision=mixed_precision,\n num_machines=num_machines,\n )\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 4f392d6fa..77db4546b 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -19,6 +19,7 @@\n import os\n import subprocess\n import sys\n+import warnings\n from ast import literal_eval\n from pathlib import Path\n from typing import Dict, List\n@@ -53,6 +54,16 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ default=\"no\",\n+ type=str,\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether or not to use mixed precision training. \"\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n+ )\n+\n parser.add_argument(\n \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\n )\n@@ -134,7 +145,16 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+\n+ mixed_precision = args.mixed_precision.lower()\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+\n+ if args.fp16:\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n+\n+ current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n \n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n@@ -168,7 +188,16 @@ def multi_gpu_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+ mixed_precision = args.mixed_precision.lower()\n+\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+\n+ if args.fp16:\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n+\n+ current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n \n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n@@ -201,7 +230,16 @@ def deepspeed_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+ mixed_precision = args.mixed_precision.lower()\n+\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n+\n+ if args.fp16:\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n+\n+ current_env[\"MIXED_PRECISION\"] = str(mixed_precision)\n current_env[\"USE_DEEPSPEED\"] = \"true\"\n current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n@@ -309,8 +347,17 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n print(\"Converting Arguments to Hyperparameters\")\n hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n \n- environment = {\"USE_FP16\": args.fp16} # Environment variables to be set for use during training job\n+ mixed_precision = args.mixed_precision.lower()\n+\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(f\"Unknown mixed_precision mode: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\")\n \n+ if args.fp16:\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n+\n+ # Environment variables to be set for use during training job\n+ environment = {\"MIXED_PRECISION\": str(mixed_precision)}\n # configure distribution set up\n distribution = None # TODO: not yet implemented\n \n@@ -360,13 +407,16 @@ def launch_command(args):\n \n # Those args are handled separately\n if (\n- name not in [\"compute_environment\", \"fp16\", \"distributed_type\"]\n+ name not in [\"compute_environment\", \"fp16\", \"mixed_precision\", \"distributed_type\"]\n and getattr(args, name, None) is None\n ):\n setattr(args, name, attr)\n \n- if not args.fp16:\n- args.fp16 = defaults.fp16\n+ if not args.mixed_precision:\n+ if args.fp16:\n+ args.mixed_precision = \"fp16\"\n+ else:\n+ args.mixed_precision = defaults.mixed_precision\n else:\n if args.num_processes is None:\n args.num_processes = 1\ndiff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nindex b848e13d4..90b4f826a 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/notebook_launcher.py\n@@ -14,6 +14,7 @@\n \n import os\n import sys\n+import warnings\n \n import torch\n \n@@ -23,7 +24,7 @@\n from .utils import PrepareForLaunch\n \n \n-def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use_port=\"29500\"):\n+def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n \"\"\"\n Launches a training function, using several processes if it's possible in the current environment (TPU with\n multiple cores for instance).\n@@ -37,8 +38,8 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n num_processes (:obj:`int`, `optional`):\n The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to\n the number of GPUs available otherwise.\n- use_fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n- If :obj:`True`, will use mixed precision training on multi-GPU.\n+ mixed_precision (:obj:`str`, `optional`, defaults to :obj:`no`):\n+ If :obj:`fp16` or :obj:`bf16`, will use mixed precision training on multi-GPU.\n use_port (:obj:`str`, `optional`, defaults to :obj:`\"29500\"`):\n The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n@@ -110,7 +111,18 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n os.environ[\"WORLD_SIZE\"] = str(num_processes)\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = str(use_port)\n- os.environ[\"USE_FP16\"] = str(use_fp16)\n+\n+ mixed_precision = mixed_precision.lower()\n+ if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\n+ raise ValueError(\n+ f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"\n+ )\n+\n+ if use_fp16:\n+ warnings.warn('use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n+\n+ os.environ[\"MIXED_PRECISION\"] = str(mixed_precision)\n \n launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n try:\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex e4d536e14..6693a34ca 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -66,6 +66,11 @@ def parse_flag_from_env(key, default=False):\n return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\n \n \n+def parse_choice_from_env(key, default=\"no\"):\n+ value = os.environ.get(key, str(default))\n+ return value\n+\n+\n class DistributedType(str, Enum):\n \"\"\"\n Represents a type of distributed environment.\n@@ -133,18 +138,25 @@ class AcceleratorState:\n - **num_processes** (:obj:`int`) -- The number of processes currently launched in parallel.\n - **process_index** (:obj:`int`) -- The index of the current process.\n - **local_process_index** (:obj:`int`) -- The index of the current process on the current server.\n- - **use_fp16** (:obj:`bool`) -- Whether or not the current script will use mixed precision.\n+ - **mixed_precision** (:obj:`str`) -- Whether or not the current script will use mixed precision. If you are\n+ using mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n \"\"\"\n \n _shared_state = {}\n \n def __init__(\n- self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None, _from_accelerator: bool = False, **kwargs\n+ self,\n+ mixed_precision: str = None,\n+ cpu: bool = False,\n+ deepspeed_plugin=None,\n+ _from_accelerator: bool = False,\n+ **kwargs,\n ):\n self.__dict__ = self._shared_state\n if not getattr(self, \"initialized\", False):\n self.backend = None\n self.deepspeed_plugin = None\n+ mixed_precision = mixed_precision.lower() if mixed_precision else None\n if not _from_accelerator:\n raise ValueError(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n@@ -156,7 +168,7 @@ def __init__(\n self.process_index = xm.get_ordinal()\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n- self.use_fp16 = False\n+ self.mixed_precision = \"no\"\n elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n assert (\n is_deepspeed_available()\n@@ -170,9 +182,14 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.use_fp16 = False # deepspeed handles fp16 using deepspeed_config\n- fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n- deepspeed_plugin.deepspeed_config.update({\"fp16\": {\"enabled\": fp16}})\n+ self.mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\n+ mixed_precision = (\n+ parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ )\n+ if mixed_precision == \"fp16\":\n+ deepspeed_plugin.deepspeed_config.update({\"fp16\": {\"enabled\": True}})\n+ elif mixed_precision == \"bf16\":\n+ deepspeed_plugin.deepspeed_config.update({\"bfloat16\": {\"enabled\": True}})\n self.deepspeed_plugin = deepspeed_plugin\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\n self.distributed_type = DistributedType.MULTI_GPU\n@@ -184,7 +201,10 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ self.mixed_precision = (\n+ parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ )\n+\n elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n self.distributed_type = DistributedType.MULTI_CPU\n if is_ccl_available() and get_int_from_env([\"CCL_WORKER_COUNT\"], 0) > 0:\n@@ -221,24 +241,32 @@ def __init__(\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = local_rank\n self.device = torch.device(\"cpu\")\n- self.use_fp16 = False\n+ self.mixed_precision = \"no\"\n else:\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n self.process_index = self.local_process_index = 0\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\n- self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ self.mixed_precision = (\n+ parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision is None else mixed_precision\n+ )\n self.initialized = True\n \n def __repr__(self):\n- use_fp16 = self.deepspeed_plugin.fp16 if self.distributed_type == DistributedType.DEEPSPEED else self.use_fp16\n+ mixed_precision = self.mixed_precision\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ if self.deepspeed_plugin.fp16:\n+ mixed_precision = \"fp16\"\n+ if self.deepspeed_plugin.bflaot16:\n+ mixed_precision = \"bf16\"\n+\n repr = (\n f\"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\\n\"\n f\"Num processes: {self.num_processes}\\n\"\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\n f\"Device: {self.device}\\n\"\n- f\"Use FP16 precision: {use_fp16}\\n\"\n+ f\"Mixed precision type: {mixed_precision}\\n\"\n )\n if self.distributed_type == DistributedType.DEEPSPEED:\n repr += f\"ds_config: {self.deepspeed_plugin.ds_config}\\n\"\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex 41efcf61c..ecd8e4f02 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -245,6 +245,29 @@ def training_check():\n accelerator.print(\"Training yielded the same results on one CPU or distributes setup with batch split.\")\n \n # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n+ print(\"FP16 training check.\")\n+ accelerator = Accelerator(mixed_precision=\"fp16\")\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+\n+ # TEST that previous fp16 flag still works\n+ print(\"Legacy FP16 training check.\")\n accelerator = Accelerator(fp16=True)\n train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n@@ -265,6 +288,28 @@ def training_check():\n assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n+ # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16\n+ print(\"BF16 training check.\")\n+ accelerator = Accelerator(mixed_precision=\"bf16\")\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n+ model = RegressionModel()\n+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n+\n+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n+ set_seed(42)\n+ generator.manual_seed(42)\n+ for _ in range(3):\n+ for batch in train_dl:\n+ model.zero_grad()\n+ output = model(batch[\"x\"])\n+ loss = torch.nn.functional.mse_loss(output, batch[\"y\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+\n+ model = accelerator.unwrap_model(model).cpu()\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n+\n \n def main():\n accelerator = Accelerator()\ndiff --git a/src/accelerate/test_utils/training.py b/src/accelerate/test_utils/training.py\nindex 3cdea4517..2ea86fbb1 100644\n--- a/src/accelerate/test_utils/training.py\n+++ b/src/accelerate/test_utils/training.py\n@@ -36,6 +36,10 @@ def __init__(self, a=0, b=0, double_output=False):\n super().__init__()\n self.a = torch.nn.Parameter(torch.tensor(a).float())\n self.b = torch.nn.Parameter(torch.tensor(b).float())\n+ self.first_batch = True\n \n def forward(self, x=None):\n+ if self.first_batch:\n+ print(f\"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}\")\n+ self.first_batch = False\n return x * self.a + self.b\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 991997282..3af792f1f 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -23,6 +23,8 @@\n import numpy as np\n import torch\n \n+from packaging import version\n+\n from .state import AcceleratorState, DistributedType, is_deepspeed_available, is_tpu_available\n \n \n@@ -240,23 +242,26 @@ def _initialize_tensor(tensor_info):\n \n def convert_to_fp32(tensor):\n \"\"\"\n- Recursively converts the elements nested list/tuple/dictionary of tensors in FP16 precision to FP32.\n+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.\n \n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n- The data to convert from FP16 to FP32.\n+ The data to convert from FP16/BF16 to FP32.\n \n Returns:\n- The same data structure as :obj:`tensor` with all tensors that were in FP16 precision converted to FP32.\n+ The same data structure as :obj:`tensor` with all tensors that were in FP16/BF16 precision converted to FP32.\n \"\"\"\n \n def _convert_to_fp32(tensor):\n return tensor.float()\n \n- def _is_fp16_tensor(tensor):\n- return hasattr(tensor, \"dtype\") and tensor.dtype == torch.float16\n+ def _is_fp16_bf16_tensor(tensor):\n+ return hasattr(tensor, \"dtype\") and (\n+ tensor.dtype == torch.float16\n+ or (version.parse(torch.__version__) >= version.parse(\"1.10\") and tensor.dtype == torch.bfloat16)\n+ )\n \n- return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_tensor)\n+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)\n \n \n def convert_outputs_to_fp32(model_forward):\n", "code_comments": [ { "body": "Default here should be `None`, and then get the value of `MIXED_PRECISION` (as is done for `fp16`).", "diff_hunk": "@@ -101,12 +106,24 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n+ mixed_precision: str = \"no\",", "from_author": false }, { "body": "```suggestion\r\n warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\r\n```\r\nWe're not inside a script, so I would adapt to use arguments: `fp16=True`is deprecated, use `mixed_precision=\"fp16\"` instead.", "diff_hunk": "@@ -101,12 +106,24 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n+ mixed_precision: str = \"no\",\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+ mixed_precision = mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"\n+\n+ if fp16:\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)", "from_author": false }, { "body": "Rephrase as a test and raise a ValueError:\r\n```\r\nif mixed_precision not in [...]:\r\n raise ValueError(...)\r\n```", "diff_hunk": "@@ -101,12 +106,24 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n+ mixed_precision: str = \"no\",\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+ mixed_precision = mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"", "from_author": false }, { "body": "```suggestion\r\n return self.mixed_precision != \"no\":\r\n```\r\nSeems easier this way.", "diff_hunk": "@@ -195,11 +220,23 @@ def is_local_main_process(self):\n \n @property\n def use_fp16(self):\n+ if self.mixed_precision != \"no\":\n+ return True\n+ else:\n+ return False", "from_author": false }, { "body": "We have to be a bit more careful here and add a version test, as the argument `dtype` was only introduce in PyTorch 1.10. So this first test should include\r\n```\r\nand version.parse(torch.__version__) >= version.parse(\"1.10\")\r\n```\r\notherwise we will fall in the else branch at the end.", "diff_hunk": "@@ -311,7 +348,12 @@ def prepare_model(self, model):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n- model.forward = torch.cuda.amp.autocast()(model.forward)\n+ if self.mixed_precision == \"fp16\":\n+ model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -577,7 +619,13 @@ def autocast(self):\n different will happen otherwise.\n \"\"\"\n if self.native_amp:\n- autocast_context = torch.cuda.amp.autocast()\n+ if self.mixed_precision == \"fp16\":", "from_author": false }, { "body": "It should also be added in the `from_json_file` method above for backward compatibility, and I think we should remove the fp16 key as well, to fully convert the config to the new format.", "diff_hunk": "@@ -97,6 +98,12 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict = yaml.safe_load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+ if \"mixed_precision\" not in config_dict:", "from_author": false }, { "body": "As before, use a test and raise a ValueError here.", "diff_hunk": "@@ -134,7 +146,19 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+\n+ mixed_precision = args.mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -168,7 +192,18 @@ def multi_gpu_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+ mixed_precision = args.mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"", "from_author": false }, { "body": "Same here.", "diff_hunk": "@@ -201,7 +236,18 @@ def deepspeed_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+ mixed_precision = args.mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"", "from_author": false }, { "body": "And same here.", "diff_hunk": "@@ -309,8 +355,20 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n print(\"Converting Arguments to Hyperparameters\")\n hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n \n- environment = {\"USE_FP16\": args.fp16} # Environment variables to be set for use during training job\n+ mixed_precision = args.mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"", "from_author": false }, { "body": "Nit, put the comment before and have the dict on one line:\r\n```suggestion\r\n # Environment variables to be set for use during training job\r\n environment = {\"MIXED_PRECISION\": str(mixed_precision)}\r\n```", "diff_hunk": "@@ -309,8 +355,20 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n print(\"Converting Arguments to Hyperparameters\")\n hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n \n- environment = {\"USE_FP16\": args.fp16} # Environment variables to be set for use during training job\n+ mixed_precision = args.mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"\n+\n+ if args.fp16:\n+ warnings.warn('--fp16 flag is deprecated. Use \"--mixed_precision fp16\" instead.', DeprecationWarning)\n+ mixed_precision = \"fp16\"\n \n+ environment = {\n+ \"MIXED_PRECISION\": str(mixed_precision)\n+ } # Environment variables to be set for use during training job", "from_author": false }, { "body": "As before, raise a value error here.", "diff_hunk": "@@ -110,7 +113,19 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n os.environ[\"WORLD_SIZE\"] = str(num_processes)\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = str(use_port)\n- os.environ[\"USE_FP16\"] = str(use_fp16)\n+\n+ mixed_precision = mixed_precision.lower()\n+ assert mixed_precision in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ], f\"Unknown mixed_precision: {mixed_precision}. Choose between 'no', 'fp16' and 'bf16'.\"", "from_author": false }, { "body": "Should be `None` here", "diff_hunk": "@@ -133,18 +138,25 @@ class AcceleratorState:\n - **num_processes** (:obj:`int`) -- The number of processes currently launched in parallel.\n - **process_index** (:obj:`int`) -- The index of the current process.\n - **local_process_index** (:obj:`int`) -- The index of the current process on the current server.\n- - **use_fp16** (:obj:`bool`) -- Whether or not the current script will use mixed precision.\n+ - **mixed_precision** (:obj:`str`) -- Whether or not the current script will use mixed precision. If\n+ you are using mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n \"\"\"\n \n _shared_state = {}\n \n def __init__(\n- self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None, _from_accelerator: bool = False, **kwargs\n+ self,\n+ mixed_precision: str = \"no\",", "from_author": false }, { "body": "Test for None before lowering there.", "diff_hunk": "@@ -133,18 +138,25 @@ class AcceleratorState:\n - **num_processes** (:obj:`int`) -- The number of processes currently launched in parallel.\n - **process_index** (:obj:`int`) -- The index of the current process.\n - **local_process_index** (:obj:`int`) -- The index of the current process on the current server.\n- - **use_fp16** (:obj:`bool`) -- Whether or not the current script will use mixed precision.\n+ - **mixed_precision** (:obj:`str`) -- Whether or not the current script will use mixed precision. If\n+ you are using mixed precision, define if you want to use FP16 or BF16 (bfloat16) as the floating point.\n \"\"\"\n \n _shared_state = {}\n \n def __init__(\n- self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None, _from_accelerator: bool = False, **kwargs\n+ self,\n+ mixed_precision: str = \"no\",\n+ cpu: bool = False,\n+ deepspeed_plugin=None,\n+ _from_accelerator: bool = False,\n+ **kwargs,\n ):\n self.__dict__ = self._shared_state\n if not getattr(self, \"initialized\", False):\n self.backend = None\n self.deepspeed_plugin = None\n+ mixed_precision = mixed_precision.lower()", "from_author": false }, { "body": "Get the env variable when `mixed_precision` is None", "diff_hunk": "@@ -170,9 +182,14 @@ def __init__(\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n- self.use_fp16 = False # deepspeed handles fp16 using deepspeed_config\n- fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n- deepspeed_plugin.deepspeed_config.update({\"fp16\": {\"enabled\": fp16}})\n+ self.mixed_precision = \"no\" # deepspeed handles mixed_precision using deepspeed_config\n+ mixed_precision = (\n+ parse_choice_from_env(\"MIXED_PRECISION\", \"no\") if mixed_precision == \"no\" else mixed_precision", "from_author": false }, { "body": "This would also need a version check, as I'm not sure the `torch.bfloat16` was there in PyTorch 1.6.", "diff_hunk": "@@ -240,23 +240,23 @@ def _initialize_tensor(tensor_info):\n \n def convert_to_fp32(tensor):\n \"\"\"\n- Recursively converts the elements nested list/tuple/dictionary of tensors in FP16 precision to FP32.\n+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.\n \n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n- The data to convert from FP16 to FP32.\n+ The data to convert from FP16/BF16 to FP32.\n \n Returns:\n- The same data structure as :obj:`tensor` with all tensors that were in FP16 precision converted to FP32.\n+ The same data structure as :obj:`tensor` with all tensors that were in FP16/BF16 precision converted to FP32.\n \"\"\"\n \n def _convert_to_fp32(tensor):\n return tensor.float()\n \n- def _is_fp16_tensor(tensor):\n- return hasattr(tensor, \"dtype\") and tensor.dtype == torch.float16\n+ def _is_fp16_bf16_tensor(tensor):\n+ return hasattr(tensor, \"dtype\") and (tensor.dtype == torch.float16 or tensor.dtype == torch.bfloat16)", "from_author": false }, { "body": "Version test added ", "diff_hunk": "@@ -311,7 +348,12 @@ def prepare_model(self, model):\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n- model.forward = torch.cuda.amp.autocast()(model.forward)\n+ if self.mixed_precision == \"fp16\":\n+ model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward)", "from_author": true }, { "body": "I forgot about the \"from_json_file\" method, i have updated it too and I added a line to delete the fp16 key from old config files", "diff_hunk": "@@ -97,6 +98,12 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict = yaml.safe_load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+ if \"mixed_precision\" not in config_dict:", "from_author": true }, { "body": "```suggestion\r\n if mixed_precision is not None:\r\n```\r\nWe usually prefer explicit tests instead of relying on Python's bool conversion.", "diff_hunk": "@@ -101,12 +106,29 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n+ mixed_precision: str = None,\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+\n+ if mixed_precision:", "from_author": false }, { "body": "```suggestion\r\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\r\n```\r\nShould fit in one line now.", "diff_hunk": "@@ -101,12 +106,29 @@ def __init__(\n device_placement: bool = True,\n split_batches: bool = False,\n fp16: bool = None,\n+ mixed_precision: str = None,\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+\n+ if mixed_precision:\n+ mixed_precision = mixed_precision.lower()\n+ if mixed_precision not in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ]:", "from_author": false }, { "body": "I realize we are silently not training in mixed precision when:\r\n- `mixed_precision=\"fp16\"` and torch version is < 1.6\r\n- `mixed_precision=\"bf16\"` and torch version is < 1.10\r\n\r\nI think we should raise an error message in both cases, or at least a warning.", "diff_hunk": "@@ -149,10 +175,14 @@ def __init__(\n # Mixed precision attributes\n self.scaler = None\n self.native_amp = False\n- if self.state.use_fp16:\n+ if self.state.mixed_precision == \"fp16\":\n self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.6\")\n kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n+ elif self.state.mixed_precision == \"bf16\":\n+ self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.10\")\n+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)", "from_author": false }, { "body": "Nice!", "diff_hunk": "@@ -97,6 +98,12 @@ def from_yaml_file(cls, yaml_file=None):\n config_dict = yaml.safe_load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+ if \"mixed_precision\" not in config_dict:", "from_author": false }, { "body": "No need for this new line I think.\r\n```suggestion\r\n```", "diff_hunk": "@@ -53,9 +54,20 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n+ parser.add_argument(\n+ \"--mixed_precision\",\n+ default=\"no\",\n+ type=str,\n+ choices=[\"no\", \"fp16\", \"bf16\"],\n+ help=\"Whether or not to use mixed precision training. \"\n+ \"Choose between FP16 and BF16 (bfloat16) training. \"\n+ \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n+ )\n+\n parser.add_argument(\n \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\n )\n+", "from_author": false }, { "body": "```suggestion\r\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\r\n```", "diff_hunk": "@@ -134,7 +146,20 @@ def simple_launcher(args):\n \n current_env = os.environ.copy()\n current_env[\"USE_CPU\"] = str(args.cpu)\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+\n+ mixed_precision = args.mixed_precision.lower()\n+ if mixed_precision not in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ]:", "from_author": false }, { "body": "```suggestion\r\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\r\n```", "diff_hunk": "@@ -168,7 +193,20 @@ def multi_gpu_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+ mixed_precision = args.mixed_precision.lower()\n+\n+ if mixed_precision not in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ]:", "from_author": false }, { "body": "```suggestion\r\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\r\n```", "diff_hunk": "@@ -201,7 +239,20 @@ def deepspeed_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n- current_env[\"USE_FP16\"] = str(args.fp16)\n+ mixed_precision = args.mixed_precision.lower()\n+\n+ if mixed_precision not in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ]:", "from_author": false }, { "body": "```suggestion\r\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\r\n```", "diff_hunk": "@@ -309,8 +360,21 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n print(\"Converting Arguments to Hyperparameters\")\n hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n \n- environment = {\"USE_FP16\": args.fp16} # Environment variables to be set for use during training job\n+ mixed_precision = args.mixed_precision.lower()\n+\n+ if mixed_precision not in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ]:", "from_author": false }, { "body": "You can remove this from the documentation (we try to avoid documenting deprecated args so users don't start using them).", "diff_hunk": "@@ -39,6 +40,8 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n the number of GPUs available otherwise.\n use_fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If :obj:`True`, will use mixed precision training on multi-GPU.", "from_author": false }, { "body": "```suggestion\r\n if mixed_precision not in [\"no\", \"fp16\", \"bf16\"]:\r\n```", "diff_hunk": "@@ -110,7 +113,22 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n os.environ[\"WORLD_SIZE\"] = str(num_processes)\n os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n os.environ[\"MASTER_PORT\"] = str(use_port)\n- os.environ[\"USE_FP16\"] = str(use_fp16)\n+\n+ mixed_precision = mixed_precision.lower()\n+ if mixed_precision not in [\n+ \"no\",\n+ \"fp16\",\n+ \"bf16\",\n+ ]:", "from_author": false } ], "context": [ { "body": "I have updated the PR with the fixes you mention ", "from_author": true }, { "body": "Let me know if you need any help with the last standing comments. When you're finished, make sure to run `make style` on your branch so that the quality check passes :-)", "from_author": false }, { "body": "I think they are solved now. Maybe we should add a warning in 'cluster.py' to warn if the user chooses the \"fp16\" or \"bf16\" option but doesn't have the correct version of Pytorch installed?", "from_author": true }, { "body": "This [comment](https://github.com/huggingface/accelerate/pull/247/files#r800669214) and this [one](https://github.com/huggingface/accelerate/pull/247/files#r800671521) don't seem to have been addressed. Could you take care of them?\r\nQuality check is also still failing.\r\n\r\nWe can also add warnings when the options are chosen, yes!", "from_author": false }, { "body": "Sorry the previous commit to solve the problem of silently not training in mixed precision was incorrect. I have a question about implementation. If the version is correct we set self.native_amp to True. \r\n```Python\r\nself.native_amp = version.parse(torch.__version__) >= version.parse(\"1.6\")\r\n```\r\nIf the version is incorrect we disable mixed precision by setting that variable to False. Is it used for anything else? If we are going to raise an error, should we remove it? \r\nShould we raise a error always if the pytorch version is not correct? I'm not sure I understand this part of the code 100% and I don't know if I've implemented right the errors if the pytorch version is not compatible with the chosen mixed precision. \r\n\r\n", "from_author": true }, { "body": "We should at least raise a warning, to tell the user that we are not delivering what they asked for.", "from_author": false }, { "body": "What I mean is that what I have done is adding the following code (same for bf16 but it checks pytorch 1.10). This raises an error if you try to run \"fp16\" with Pytorch < 1.6. However, the previous line sets self.native_amp=True with the oposite condition. \r\nWe will never run the training if native_amp = False because we raise an error. Is this the correct behavior? If it is, should we keep the native_amp variable?\r\n\r\n```Python\r\nif self.state.mixed_precision == \"fp16\":\r\n self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.6\")\r\n if version.parse(torch.__version__) < version.parse(\"1.6\"):\r\n raise ValueError(\"fp16 mixed precision requires PyTorch >= 1.6\")\r\n```", "from_author": true }, { "body": "The way you did it is great! Thanks again for your awesome contribution :-)", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/247", "pr_id": 840384554 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 76c3067a3..d3e4fa603 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -302,10 +302,7 @@ def prepare_model(self, model):\n if self.distributed_type == DistributedType.MULTI_GPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(\n- model,\n- device_ids=[self.local_process_index],\n- output_device=self.local_process_index,\n- **kwargs,\n+ model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n elif self.distributed_type == DistributedType.MULTI_CPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n@@ -352,8 +349,12 @@ def _prepare_deepspeed(self, *args):\n is_adamw = isinstance(optimizer, torch.optim.AdamW)\n if (is_adam or is_adamw) and deepspeed_plugin.offload_optimizer_device == \"cpu\":\n defaults = optimizer.defaults\n+ params = []\n+ for group in optimizer.param_groups:\n+ params.extend(group[\"params\"])\n+\n optimizer = deepspeed.ops.adam.DeepSpeedCPUAdam(\n- model.parameters(),\n+ params,\n lr=defaults[\"lr\"],\n bias_correction=True,\n betas=defaults[\"betas\"],\n", "code_comments": [], "context": [ { "body": "Could you just run `make style` on your branch to fix the quality issue? Thanks!", "from_author": false }, { "body": "is this okay? it looks like there are a few other lines in there that also got style fixed", "from_author": true }, { "body": "Looks good, thanks again!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/246", "pr_id": 839822455 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 443c55622..6eba4b295 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -118,11 +118,11 @@ def honor_type(obj, generator):\n \"\"\"\n Cast a generator to the same type as obj (list, tuple or namedtuple)\n \"\"\"\n- # There is no direct check whether an object if of type namedtuple sadly, this is a workaround.\n- if isinstance(obj, tuple) and hasattr(obj, \"_fields\"):\n- # Can instantiate a namedtuple from a generator directly, contrary to a tuple/list.\n+ try:\n+ return type(obj)(generator)\n+ except TypeError:\n+ # Some objects may not be able to instantiate from a generator directly\n return type(obj)(*list(generator))\n- return type(obj)(generator)\n \n \n def is_torch_tensor(tensor):\n", "code_comments": [], "context": [ { "body": "Sorry I was on my Chinese new year leave and just get back.\r\nYeah I do agree this should raise exact error with enough information for debugging.", "from_author": true }, { "body": "Could you just run `male style` on your branch so we can merge this?", "from_author": false }, { "body": "hi, I just noticed the existence of this pr......\r\nI've fixed the linting issue related to this pr, but there seems to be more on other code", "from_author": true }, { "body": "Seems like the checks are all passing now, thanks again!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/241", "pr_id": 834108210 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 3af792f1f..36eaea592 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -18,7 +18,7 @@\n from collections.abc import Mapping\n from dataclasses import dataclass, field\n from enum import Enum\n-from typing import List, Optional, Union\n+from typing import Any, List, Optional, Union\n \n import numpy as np\n import torch\n@@ -350,6 +350,39 @@ def gather(tensor):\n return tensor\n \n \n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(AcceleratorState().num_processes)]\n+ torch.distributed.all_gather_object(output_objects, object)\n+ return output_objects\n+\n+ return recursively_apply(_gpu_gather_object_one, object)\n+\n+\n+_cpu_gather_object = _gpu_gather_object\n+\n+\n+def gather_object(object: Any):\n+ \"\"\"\n+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.\n+\n+ Args:\n+ object (nested list/tuple/dictionary of picklable object):\n+ The data to gather.\n+\n+ Returns:\n+ The same data structure as :obj:`object` with all the objects sent to every device.\n+ \"\"\"\n+ if AcceleratorState().distributed_type == DistributedType.TPU:\n+ raise NotImplementedError(\"gather objects in TPU is not supported\")\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ return _gpu_gather_object(object)\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n+ return _cpu_gather_object(object)\n+ else:\n+ return object\n+\n+\n def _gpu_broadcast(data, src=0):\n def _gpu_broadcast_one(tensor, src=0):\n torch.distributed.broadcast(tensor, src=src)\n", "code_comments": [ { "body": "```suggestion\r\n object (nested list/tuple/dictionary of any object):\r\n```", "diff_hunk": "@@ -345,6 +345,40 @@ def gather(tensor):\n return tensor\n \n \n+\n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(torch.distributed.get_world_size())]\n+ torch.distributed.all_gather_object(output_objects, object)\n+ return [i for j in output_objects for i in j]\n+\n+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)\n+\n+\n+_cpu_gather_object = _gpu_gather_object\n+\n+\n+def gather_object(object: Any):\n+ \"\"\"\n+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.\n+\n+ Args:\n+ object (nested list/tuple/dictionary of :obj:`Any`):", "from_author": false }, { "body": "Here we should use the `state.num_processes` instead.", "diff_hunk": "@@ -345,6 +345,40 @@ def gather(tensor):\n return tensor\n \n \n+\n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(torch.distributed.get_world_size())]", "from_author": false }, { "body": "I don't understand this line. This presupposes that every element in `output_objects` is iterable, but `object` is supposed to be a primitive object at this point (tensor or other if we remove `error_on_type=True`).", "diff_hunk": "@@ -345,6 +345,40 @@ def gather(tensor):\n return tensor\n \n \n+\n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(torch.distributed.get_world_size())]\n+ torch.distributed.all_gather_object(output_objects, object)\n+ return [i for j in output_objects for i in j]", "from_author": false }, { "body": "`torch.all_gather_object` works for all picklable objects, so we can skip the `error_on_other_type=True` I believe.", "diff_hunk": "@@ -345,6 +345,40 @@ def gather(tensor):\n return tensor\n \n \n+\n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(torch.distributed.get_world_size())]\n+ torch.distributed.all_gather_object(output_objects, object)\n+ return [i for j in output_objects for i in j]\n+\n+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)", "from_author": false }, { "body": "```suggestion\r\n The same data structure as :obj:`object` with all the objects sent to every device.\r\n```\r\nI think we need to explain where the list arrives (since each tensor is replaced by a list of tensors normally).", "diff_hunk": "@@ -345,6 +345,40 @@ def gather(tensor):\n return tensor\n \n \n+\n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(torch.distributed.get_world_size())]\n+ torch.distributed.all_gather_object(output_objects, object)\n+ return [i for j in output_objects for i in j]\n+\n+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)\n+\n+\n+_cpu_gather_object = _gpu_gather_object\n+\n+\n+def gather_object(object: Any):\n+ \"\"\"\n+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.\n+\n+ Args:\n+ object (nested list/tuple/dictionary of :obj:`Any`):\n+ The data to gather.\n+\n+ Returns:\n+ The same data structure as :obj:`Any` with all objects sent to the proper device.", "from_author": false }, { "body": "```suggestion\r\n output_objects = [None for _ in range(AcceleratorState().num_processes)]\r\n```\r\n`state` is not defined.", "diff_hunk": "@@ -350,6 +350,39 @@ def gather(tensor):\n return tensor\n \n \n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(state.num_processes)]", "from_author": false }, { "body": "```suggestion\r\n return recursively_apply(_gpu_gather_object_one, object)\r\n```", "diff_hunk": "@@ -350,6 +350,39 @@ def gather(tensor):\n return tensor\n \n \n+def _gpu_gather_object(object: Any):\n+ def _gpu_gather_object_one(object: Any):\n+ output_objects = [None for _ in range(state.num_processes)]\n+ torch.distributed.all_gather_object(output_objects, object)\n+ return output_objects\n+\n+ return recursively_apply(_gpu_gather_one, tensor)", "from_author": false } ], "context": [ { "body": "> I don't understand this line. This presupposes that every element in output_objects is iterable, but object is supposed to be a primitive object at this point (tensor or other if we remove error_on_type=True).\r\n\r\nShould we separate `gather_object` to two functions, `gather_object_list` perform `[i for j in output_objects for i in j]` so it works much like original `gather`, while `gather_object` doesn not perform concatenation so it has better support for every objects\r\n\r\nOr maybe we could add an argument to control it's behavior like\r\n\r\n```python\r\ndef _gpu_gather_object(object: Any, concat: bool = True):\r\n def _gpu_gather_object_one(object: Any):\r\n output_objects = [None for _ in range(torch.distributed.get_world_size())]\r\n torch.distributed.all_gather_object(output_objects, object)\r\n if concat:\r\n return [i for j in output_objects for i in j]\r\n else:\r\n return output_objects \r\n```", "from_author": true }, { "body": "It can't really concatenate if the tensors are not of the same size, so `gather_object` should not concatenate IMO.", "from_author": false }, { "body": "> It can't really concatenate if the tensors are not of the same size, so `gather_object` should not concatenate IMO.\r\n\r\nYes we can leave it to the user to concat manually. ", "from_author": true }, { "body": "I have resolved all conversations, could you please review it again?", "from_author": true }, { "body": "> Thanks for addressing all comments, this looks good to merge! Could you just run `make style` on your branch to fix the quality failure?\r\n\r\nSorry for this late reply.... I believe I've resolved the quality issue.", "from_author": true }, { "body": "> Here is how to solve the last quality issues (variables not defined).\r\n\r\nThank you for your kind suggestions! I have resolved these\r\n(Sorry I'm recently experiencing some computer issues and `make style` is not giving me full outputs. ", "from_author": true }, { "body": "Hmmmm, is it possible for us to determine the type of input data and auto call corresponding gather function? I just noticed [torchvison](https://github.com/pytorch/vision/blob/main/references/detection/utils.py#L70-L83) uses `gather_object` at all time", "from_author": true }, { "body": "That's too magical for Accelerate's philosophy, users should pick which function they want to use.\r\n\r\nAll green now so we can merge your PR :-) Thanks again!", "from_author": false }, { "body": "> That's too magical for Accelerate's philosophy, users should pick which function they want to use.\n\nHaha, thats the reason why i love this repo so much. Users should be trained programmer and should know what they're doing/supposed to do. \n\n> All green now so we can merge your PR :-) Thanks again!\n\nThank you for all the review and comments~", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/238", "pr_id": 832196804 }, { "diff": "diff --git a/docs/source/installation.md b/docs/source/installation.md\nindex 6dac579e9..ca2f88286 100644\n--- a/docs/source/installation.md\n+++ b/docs/source/installation.md\n@@ -73,7 +73,7 @@ If you want to constantly use the bleeding edge `master` version of the source c\n \n ``` bash\n git clone https://github.com/huggingface/accelerate.git\n-cd transformers\n+cd accelerate\n pip install -e .\n ```\n \n", "code_comments": [], "context": [ { "body": "Sorry I forgot to merge for some reason...", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/234", "pr_id": 825368983 }, { "diff": "diff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex aad2b8b65..a1f310271 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -125,7 +125,7 @@ def collate_fn(examples):\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=100,\n- num_training_steps=len(train_dataloader) * num_epochs,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n )\n \n # Now we train the model\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex f94b2cd99..76c3067a3 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -22,7 +22,7 @@\n from packaging import version\n \n from .data_loader import prepare_data_loader\n-from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, KwargsHandler\n+from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n from .state import AcceleratorState, DistributedType, is_deepspeed_available\n from .utils import (\n@@ -114,15 +114,10 @@ def __init__(\n deepspeed_plugin, DeepSpeedPlugin\n ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n \n- self.state = AcceleratorState(fp16=fp16, cpu=cpu, deepspeed_plugin=deepspeed_plugin, _from_accelerator=True)\n-\n- self.device_placement = device_placement\n- self.split_batches = split_batches\n- self.dispatch_batches = dispatch_batches\n-\n # Kwargs handlers\n self.ddp_handler = None\n self.scaler_handler = None\n+ self.init_handler = None\n if kwargs_handlers is not None:\n for handler in kwargs_handlers:\n assert isinstance(handler, KwargsHandler), f\"Unsupported kwargs handler passed: {handler}.\"\n@@ -136,6 +131,20 @@ def __init__(\n raise ValueError(\"You can only pass one `GradScalerKwargs` in `kwargs_handler`.\")\n else:\n self.scaler_handler = handler\n+ elif isinstance(handler, InitProcessGroupKwargs):\n+ if self.init_handler is not None:\n+ raise ValueError(\"You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.\")\n+ else:\n+ self.init_handler = handler\n+\n+ kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}\n+ self.state = AcceleratorState(\n+ fp16=fp16, cpu=cpu, deepspeed_plugin=deepspeed_plugin, _from_accelerator=True, **kwargs\n+ )\n+\n+ self.device_placement = device_placement\n+ self.split_batches = split_batches\n+ self.dispatch_batches = dispatch_batches\n \n # Mixed precision attributes\n self.scaler = None\ndiff --git a/src/accelerate/kwargs_handlers.py b/src/accelerate/kwargs_handlers.py\nindex 7f52d6639..51760efdc 100644\n--- a/src/accelerate/kwargs_handlers.py\n+++ b/src/accelerate/kwargs_handlers.py\n@@ -14,6 +14,8 @@\n \n import copy\n from dataclasses import dataclass\n+from datetime import timedelta\n+from typing import Optional\n \n \n class KwargsHandler:\n@@ -71,3 +73,16 @@ class GradScalerKwargs(KwargsHandler):\n backoff_factor: float = 0.5\n growth_interval: int = 2000\n enabled: bool = True\n+\n+\n+@dataclass\n+class InitProcessGroupKwargs(KwargsHandler):\n+ \"\"\"\n+ Use this object in your :class:`~accelerate.Accelerator` to customize the initialization of the distributed\n+ processes. Please refer to the documentation of this `method\n+ <https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group>`__ for more information on\n+ each argument.\n+ \"\"\"\n+\n+ init_method: Optional[str] = None\n+ timeout: timedelta = timedelta(seconds=1800)\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 5b349fc38..e4d536e14 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -138,7 +138,9 @@ class AcceleratorState:\n \n _shared_state = {}\n \n- def __init__(self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None, _from_accelerator: bool = False):\n+ def __init__(\n+ self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None, _from_accelerator: bool = False, **kwargs\n+ ):\n self.__dict__ = self._shared_state\n if not getattr(self, \"initialized\", False):\n self.backend = None\n@@ -161,7 +163,7 @@ def __init__(self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None,\n ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n self.distributed_type = DistributedType.DEEPSPEED\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend=\"nccl\")\n+ torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n@@ -175,7 +177,7 @@ def __init__(self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None,\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend=\"nccl\")\n+ torch.distributed.init_process_group(backend=\"nccl\", **kwargs)\n self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n@@ -213,7 +215,7 @@ def __init__(self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None,\n \"please try exporting rank 0's hostname as MASTER_ADDR\"\n )\n if not torch.distributed.is_initialized():\n- torch.distributed.init_process_group(backend, rank=rank, world_size=size)\n+ torch.distributed.init_process_group(backend, rank=rank, world_size=size, **kwargs)\n self.backend = backend\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/228", "pr_id": 817943268 }, { "diff": "diff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex aad2b8b65..a1f310271 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -125,7 +125,7 @@ def collate_fn(examples):\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=100,\n- num_training_steps=len(train_dataloader) * num_epochs,\n+ num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,\n )\n \n # Now we train the model\n", "code_comments": [], "context": [ { "body": ":+1: ", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/227", "pr_id": 817932480 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 4269242b4..52217ba6a 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -53,19 +53,27 @@ class AcceleratedOptimizer(torch.optim.Optimizer):\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n- self.state = AcceleratorState()\n+ self.accelerator_state = AcceleratorState()\n self.device_placement = device_placement\n self._is_overflow = False\n \n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n- if self.state.distributed_type == DistributedType.TPU:\n- xm.send_cpu_data_to_device(state_dict, self.state.device)\n+ if self.accelerator_state.distributed_type == DistributedType.TPU:\n+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n else:\n- state_dict = move_to_device(state_dict, self.state.device)\n+ state_dict = move_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n \n+ @property\n+ def state(self):\n+ return self.optimizer.state\n+\n+ @state.setter\n+ def state(self, state):\n+ self.optimizer.state = state\n+\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n@@ -86,8 +94,8 @@ def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n \n def load_state_dict(self, state_dict):\n- if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n- xm.send_cpu_data_to_device(state_dict, self.state.device)\n+ if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:\n+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n \n def state_dict(self):\n@@ -113,7 +121,7 @@ def zero_grad(self, set_to_none=None):\n self.optimizer.zero_grad()\n \n def step(self, closure=None):\n- if self.state.distributed_type == DistributedType.TPU:\n+ if self.accelerator_state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/224", "pr_id": 813542932 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 73f8ecc0b..b28f24fc6 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -335,6 +335,7 @@ def __iter__(self):\n # We only iterate through the DataLoader on process 0.\n main_iterator = super().__iter__()\n stop_iteration = False\n+ first_batch = None\n while not stop_iteration:\n # On process 0, we gather the batch to dispatch.\n if state.process_index == 0:\n@@ -382,7 +383,18 @@ def __iter__(self):\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n \n- batch_size = find_batch_size(batch) // state.num_processes\n+ if not self.drop_last and first_batch is None:\n+ # We keep at least num processes elements of the first batch to be able to complete the last batch\n+ first_batch = slice_tensors(batch, slice(0, state.num_processes))\n+\n+ observed_batch_size = find_batch_size(batch)\n+ batch_size = observed_batch_size // state.num_processes\n+\n+ if not self.drop_last and stop_iteration and observed_batch_size % state.num_processes != 0:\n+ # If the last batch is not complete, let's add the first batch to it.\n+ batch = concatenate([batch, first_batch], dim=0)\n+ batch_size += 1\n+\n data_slice = slice(state.process_index * batch_size, (state.process_index + 1) * batch_size)\n \n if state.distributed_type == DistributedType.TPU:\n@@ -534,6 +546,7 @@ def prepare_data_loader(\n \n # Need to provide batch_size as batch_sampler is None for Iterable dataset\n if new_batch_sampler is None:\n+ kwargs[\"drop_last\"] = dataloader.drop_last\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n if dispatch_batches:\n", "code_comments": [ { "body": "Will this run twice on the first batch in case `drop_last` is `False`?", "diff_hunk": "@@ -382,7 +383,18 @@ def __iter__(self):\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n \n- batch_size = find_batch_size(batch) // state.num_processes\n+ if not self.drop_last and first_batch is None:\n+ # We keep at least num processes elements of the first batch to be able to complete the last batch\n+ first_batch = slice_tensors(batch, slice(0, state.num_processes))\n+\n+ observed_batch_size = find_batch_size(batch)\n+ batch_size = observed_batch_size // state.num_processes\n+\n+ if not self.drop_last and stop_iteration and observed_batch_size % state.num_processes != 0:\n+ # If the last batch is not complete, let's add the first batch to it.\n+ batch = concatenate([batch, first_batch], dim=0)", "from_author": false }, { "body": "Yes, like all the dataloading utils, the batches need to be the same length on all processes, which means we have to add some data from the beginning to completet the last batch when `drop_last=False`.", "diff_hunk": "@@ -382,7 +383,18 @@ def __iter__(self):\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n \n- batch_size = find_batch_size(batch) // state.num_processes\n+ if not self.drop_last and first_batch is None:\n+ # We keep at least num processes elements of the first batch to be able to complete the last batch\n+ first_batch = slice_tensors(batch, slice(0, state.num_processes))\n+\n+ observed_batch_size = find_batch_size(batch)\n+ batch_size = observed_batch_size // state.num_processes\n+\n+ if not self.drop_last and stop_iteration and observed_batch_size % state.num_processes != 0:\n+ # If the last batch is not complete, let's add the first batch to it.\n+ batch = concatenate([batch, first_batch], dim=0)", "from_author": true } ], "context": [ { "body": "The `DispatchDataLoader` in general has no test, so we would need to add those before adding a test for the fix. I can do that, but probably not before next week so think we should merge now to at least fix the issue.", "from_author": true }, { "body": "Sounds good!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/212", "pr_id": 801556923 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex ac79d3e0a..f94b2cd99 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -150,7 +150,8 @@ def __init__(\n self._models = []\n \n # RNG Types\n- if rng_types is None:\n+ self.rng_types = rng_types\n+ if self.rng_types is None:\n self.rng_types = [\"torch\"] if version.parse(torch.__version__) <= version.parse(\"1.5.1\") else [\"generator\"]\n \n @property\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/206", "pr_id": 797586087 }, { "diff": "diff --git a/README.md b/README.md\nindex f944eea2d..cbc23ed10 100644\n--- a/README.md\n+++ b/README.md\n@@ -206,6 +206,13 @@ You should use πŸ€— Accelerate when you want to easily run your training scripts\n \n You shouldn't use πŸ€— Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, πŸ€— Accelerate is not one of them.\n \n+## Frameworks using πŸ€— Accelerate\n+\n+If you like the simplicity of πŸ€— Accelerate but would prefer a higher-level abstraction around your training loop, some frameworks that are built on top of πŸ€— Accelerate are listed below:\n+\n+* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!\n+* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.\n+\n ## Installation\n \n This repository is tested on Python 3.6+ and PyTorch 1.4.0+\ndiff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nindex fef2f0c08..b848e13d4 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/notebook_launcher.py\n@@ -114,7 +114,7 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n \n launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n try:\n- print(f\"Launching a training on {num_processes} GPUs.\")\n+ print(f\"Launching training on {num_processes} GPUs.\")\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n finally:\n # Clean up the environment variables set.\n", "code_comments": [ { "body": "No need for those two new lines here :-)", "diff_hunk": "@@ -220,6 +227,8 @@ Then, you will need to install PyTorch: refer to the [official installation page\n pip install accelerate\n ```\n \n+\n+", "from_author": false }, { "body": "Removed.", "diff_hunk": "@@ -220,6 +227,8 @@ Then, you will need to install PyTorch: refer to the [official installation page\n pip install accelerate\n ```\n \n+\n+", "from_author": true } ], "context": [ { "body": "> Thanks for your PR! I was more thinking of having a separate section named \"Frameworks using Accelerate\" where you could put the same content. There is also kornia using Accelerate internally in their trainer I would add there :-)\r\n\r\nSure thing, I can create an additional section, migrate the content, and will also look into and add Kornia. ", "from_author": true }, { "body": "@sgugger I have updated the README, hopefully that is closer to what you had in mind. Would you also like to me add a corresponding section to the docs?", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/204", "pr_id": 794387842 }, { "diff": "diff --git a/docs/source/index.rst b/docs/source/index.rst\nindex 3b1281c84..384db658a 100644\n--- a/docs/source/index.rst\n+++ b/docs/source/index.rst\n@@ -69,7 +69,7 @@ Changing it to work with accelerate is really easy and only adds a few lines of\n loss = my_loss_function(outputs, targets)\n # Just a small change for the backward instruction\n - loss.backward()\n- + accelerate.backward(loss)\n+ + accelerator.backward(loss)\n my_optimizer.step()\n \n and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n@@ -97,7 +97,7 @@ especially for TPU training):\n loss = my_loss_function(outputs, targets)\n # Just a small change for the backward instruction\n - loss.backward()\n- + accelerate.backward(loss)\n+ + accelerator.backward(loss)\n my_optimizer.step()\n \n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/199", "pr_id": 779317374 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 37ca78833..4269242b4 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -12,6 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import inspect\n+\n import torch\n \n from packaging import version\n@@ -100,9 +102,15 @@ def zero_grad(self, set_to_none=None):\n )\n self.optimizer.zero_grad()\n else:\n- if set_to_none is None:\n- set_to_none = False\n- self.optimizer.zero_grad(set_to_none=set_to_none)\n+ accept_arg = \"set_to_none\" in inspect.signature(self.optimizer.zero_grad).parameters\n+ if accept_arg:\n+ if set_to_none is None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n+ else:\n+ if set_to_none is not None:\n+ raise ValueError(\"`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.\")\n+ self.optimizer.zero_grad()\n \n def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/189", "pr_id": 762410224 }, { "diff": "diff --git a/README.md b/README.md\nindex 281325bfe..f944eea2d 100644\n--- a/README.md\n+++ b/README.md\n@@ -155,6 +155,8 @@ For instance, here is how you would run the GLUE example on the MRPC task (from\n accelerate launch examples/nlp_example.py\n ```\n \n+This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torch.distributed.launch my_script.py` at your convenance.\n+\n ## Launching multi-CPU run using MPI\n \n πŸ€— Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/187", "pr_id": 761529590 }, { "diff": "diff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 220c6ddcc..aad2b8b65 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -162,8 +162,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", type=bool, default=False, help=\"If passed, will use FP16 training.\")\n- parser.add_argument(\"--cpu\", type=bool, default=False, help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/183", "pr_id": 754539911 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex b8428733b..991997282 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -15,7 +15,7 @@\n import importlib\n import os\n import random\n-from collections import UserDict\n+from collections.abc import Mapping\n from dataclasses import dataclass, field\n from enum import Enum\n from typing import List, Optional, Union\n@@ -163,7 +163,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for o in data\n ),\n )\n- elif isinstance(data, UserDict):\n+ elif isinstance(data, Mapping):\n return type(data)(\n {\n k: recursively_apply(\n@@ -172,15 +172,6 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for k, v in data.items()\n }\n )\n- elif isinstance(data, dict):\n- return type(data)(\n- **{\n- k: recursively_apply(\n- func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n- )\n- for k, v in data.items()\n- }\n- )\n elif test_type(data):\n return func(data, *args, **kwargs)\n elif error_on_other_type:\n@@ -310,7 +301,7 @@ def extract_model_from_parallel(model):\n def _tpu_gather(tensor, name=\"gather tensor\"):\n if isinstance(tensor, (list, tuple)):\n return honor_type(tensor, (_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n- elif isinstance(tensor, (dict, UserDict)):\n+ elif isinstance(tensor, Mapping):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n@@ -365,7 +356,7 @@ def _gpu_broadcast_one(tensor, src=0):\n def _tpu_broadcast(tensor, src=0, name=\"broadcast tensor\"):\n if isinstance(tensor, (list, tuple)):\n return honor_type(tensor, (_tpu_broadcast(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n- elif isinstance(tensor, (dict, UserDict)):\n+ elif isinstance(tensor, Mapping):\n return type(tensor)({k: _tpu_broadcast(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n return xm.mesh_reduce(name, tensor, lambda x: x[src])\n \n@@ -448,7 +439,7 @@ def find_batch_size(data):\n \"\"\"\n if isinstance(data, (tuple, list)):\n return find_batch_size(data[0])\n- elif isinstance(data, (dict, UserDict)):\n+ elif isinstance(data, Mapping):\n for k in data.keys():\n return find_batch_size(data[k])\n elif not isinstance(data, torch.Tensor):\n@@ -471,10 +462,8 @@ def concatenate(data, dim=0):\n \"\"\"\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n- elif isinstance(data[0], UserDict):\n+ elif isinstance(data[0], Mapping):\n return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n- elif isinstance(data[0], dict):\n- return type(data[0])(**{k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n elif not isinstance(data[0], torch.Tensor):\n raise TypeError(f\"Can only concatenate tensors but got {type(data[0])}\")\n return torch.cat(data, dim=dim)\n", "code_comments": [ { "body": "You would need to remove the `**` for this to work with UserDict I believe.\r\n```suggestion\r\n {\r\n```", "diff_hunk": "@@ -163,16 +163,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for o in data\n ),\n )\n- elif isinstance(data, UserDict):\n- return type(data)(\n- {\n- k: recursively_apply(\n- func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n- )\n- for k, v in data.items()\n- }\n- )\n- elif isinstance(data, dict):\n+ elif isinstance(data, Mapping):\n return type(data)(\n **{", "from_author": false }, { "body": "```suggestion\r\n return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\r\n```\r\nSame here", "diff_hunk": "@@ -471,9 +462,7 @@ def concatenate(data, dim=0):\n \"\"\"\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n- elif isinstance(data[0], UserDict):\n- return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n- elif isinstance(data[0], dict):\n+ elif isinstance(data[0], Mapping):\n return type(data[0])(**{k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})", "from_author": false }, { "body": "`UserDict` also works with the `**` syntax. You can test this with the following code or by looking at the source [here](https://github.com/python/cpython/blob/2c56c97f015a7ea81719615ddcf3c745fba5b4f3/Lib/collections/__init__.py#L964-L984) (warning you that the code is a bit messy in Python 3.6-3.8 compared to Python 3.9 πŸ˜„):\r\n```python\r\nfrom collections import UserDict\r\nassert UserDict(**{\"a\": 1, \"b\": 2}) == UserDict({\"a\": 1, \"b\": 2})\r\n```", "diff_hunk": "@@ -163,16 +163,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for o in data\n ),\n )\n- elif isinstance(data, UserDict):\n- return type(data)(\n- {\n- k: recursively_apply(\n- func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n- )\n- for k, v in data.items()\n- }\n- )\n- elif isinstance(data, dict):\n+ elif isinstance(data, Mapping):\n return type(data)(\n **{", "from_author": true }, { "body": "But not the `BatchEncoding` subclass we are doing all of this for :-)\r\n\r\nYou can try and see that:\r\n```\r\nfrom transformers import BatchEncoding\r\nassert BatchEncoding(**{\"a\": 1, \"b\": 2}) == BatchEncoding({\"a\": 1, \"b\": 2})\r\n```\r\nfails", "diff_hunk": "@@ -163,16 +163,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for o in data\n ),\n )\n- elif isinstance(data, UserDict):\n- return type(data)(\n- {\n- k: recursively_apply(\n- func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n- )\n- for k, v in data.items()\n- }\n- )\n- elif isinstance(data, dict):\n+ elif isinstance(data, Mapping):\n return type(data)(\n **{", "from_author": false }, { "body": "Oh, I see. Thanks.", "diff_hunk": "@@ -163,16 +163,7 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for o in data\n ),\n )\n- elif isinstance(data, UserDict):\n- return type(data)(\n- {\n- k: recursively_apply(\n- func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n- )\n- for k, v in data.items()\n- }\n- )\n- elif isinstance(data, dict):\n+ elif isinstance(data, Mapping):\n return type(data)(\n **{", "from_author": true }, { "body": "This one still needs to be changed.", "diff_hunk": "@@ -471,9 +462,7 @@ def concatenate(data, dim=0):\n \"\"\"\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n- elif isinstance(data[0], UserDict):\n- return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n- elif isinstance(data[0], dict):\n+ elif isinstance(data[0], Mapping):\n return type(data[0])(**{k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})", "from_author": false }, { "body": "But besides replacing `(dict, UserDict)` with `Mapping` in the `isinstance` checks, the code right now is identical to the code you pushed in the initial PR. However, I see an additional simplification we can use if we abandon the `**` syntax (maybe that is what you are aiming for).", "diff_hunk": "@@ -471,9 +462,7 @@ def concatenate(data, dim=0):\n \"\"\"\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n- elif isinstance(data[0], UserDict):\n- return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n- elif isinstance(data[0], dict):\n+ elif isinstance(data[0], Mapping):\n return type(data[0])(**{k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})", "from_author": true } ], "context": [ { "body": "cc @sgugger ", "from_author": true }, { "body": "@sgugger I've reverted the changes in these two functions, so now the code should work with `BatchEncoding`. However, even though this would add some complexity, maybe a cleaner approach would be to use a specific check for `BatchEncoding`. But for now, I think the current solution is good enough.", "from_author": true }, { "body": "Thanks again for your contribution!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/180", "pr_id": 747888847 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 28a38539a..b8428733b 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -15,6 +15,7 @@\n import importlib\n import os\n import random\n+from collections import UserDict\n from dataclasses import dataclass, field\n from enum import Enum\n from typing import List, Optional, Union\n@@ -162,6 +163,15 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth\n for o in data\n ),\n )\n+ elif isinstance(data, UserDict):\n+ return type(data)(\n+ {\n+ k: recursively_apply(\n+ func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n+ )\n+ for k, v in data.items()\n+ }\n+ )\n elif isinstance(data, dict):\n return type(data)(\n **{\n@@ -300,7 +310,7 @@ def extract_model_from_parallel(model):\n def _tpu_gather(tensor, name=\"gather tensor\"):\n if isinstance(tensor, (list, tuple)):\n return honor_type(tensor, (_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n- elif isinstance(tensor, dict):\n+ elif isinstance(tensor, (dict, UserDict)):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n@@ -355,7 +365,7 @@ def _gpu_broadcast_one(tensor, src=0):\n def _tpu_broadcast(tensor, src=0, name=\"broadcast tensor\"):\n if isinstance(tensor, (list, tuple)):\n return honor_type(tensor, (_tpu_broadcast(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n- elif isinstance(tensor, dict):\n+ elif isinstance(tensor, (dict, UserDict)):\n return type(tensor)({k: _tpu_broadcast(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n return xm.mesh_reduce(name, tensor, lambda x: x[src])\n \n@@ -438,7 +448,7 @@ def find_batch_size(data):\n \"\"\"\n if isinstance(data, (tuple, list)):\n return find_batch_size(data[0])\n- elif isinstance(data, dict):\n+ elif isinstance(data, (dict, UserDict)):\n for k in data.keys():\n return find_batch_size(data[k])\n elif not isinstance(data, torch.Tensor):\n@@ -461,6 +471,8 @@ def concatenate(data, dim=0):\n \"\"\"\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n+ elif isinstance(data[0], UserDict):\n+ return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n elif isinstance(data[0], dict):\n return type(data[0])(**{k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n elif not isinstance(data[0], torch.Tensor):\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex 9b16aba7b..e515df8fe 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n import unittest\n-from collections import namedtuple\n+from collections import UserDict, namedtuple\n \n import torch\n \n@@ -54,3 +54,11 @@ def test_send_to_device(self):\n self.assertTrue(torch.equal(result3.b[0].cpu(), tensor))\n self.assertTrue(torch.equal(result3.b[1].cpu(), tensor))\n self.assertEqual(result3.c, 1)\n+\n+ result4 = send_to_device(UserDict({\"a\": tensor, \"b\": [tensor, tensor], \"c\": 1}), device)\n+ self.assertIsInstance(result4, UserDict)\n+ self.assertTrue(torch.equal(result4[\"a\"].cpu(), tensor))\n+ self.assertIsInstance(result4[\"b\"], list)\n+ self.assertTrue(torch.equal(result4[\"b\"][0].cpu(), tensor))\n+ self.assertTrue(torch.equal(result4[\"b\"][1].cpu(), tensor))\n+ self.assertEqual(result4[\"c\"], 1)\n", "code_comments": [], "context": [ { "body": "@sgugger @LysandreJik Instead of adding `UserDict` to the `isinstance` class tuple, you can use `collections.abc.Mapping` to cover both the `dict` and the `UserDict` types (e.g. PyTorch DataLoader uses such check in its default `collate_fn` to stack `dict`-like objects). \r\n", "from_author": false }, { "body": "Oh thanks for the tip! I will clean the code to use that when I have a bit of time (unless you want to suggest a PR!)", "from_author": true }, { "body": "I'm busy currently, so probably won't find time to fix this myself.", "from_author": false }, { "body": "I did find time to fix this, so I've opened a PR.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/179", "pr_id": 746855253 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex e25b80c49..28a38539a 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -201,7 +201,7 @@ def _send_to_device(t, device):\n def _has_to_method(t):\n return hasattr(t, \"to\")\n \n- return recursively_apply(_send_to_device, tensor, device, test_type=_has_to_method, error_on_other_type=True)\n+ return recursively_apply(_send_to_device, tensor, device, test_type=_has_to_method)\n \n \n def get_data_structure(data):\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex ca617634d..9b16aba7b 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -20,7 +20,7 @@\n from accelerate.utils import send_to_device\n \n \n-TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b\")\n+TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b c\")\n \n \n class UtilsTester(unittest.TestCase):\n@@ -31,23 +31,26 @@ def test_send_to_device(self):\n result1 = send_to_device(tensor, device)\n self.assertTrue(torch.equal(result1.cpu(), tensor))\n \n- result2 = send_to_device((tensor, [tensor, tensor]), device)\n+ result2 = send_to_device((tensor, [tensor, tensor], 1), device)\n self.assertIsInstance(result2, tuple)\n self.assertTrue(torch.equal(result2[0].cpu(), tensor))\n self.assertIsInstance(result2[1], list)\n self.assertTrue(torch.equal(result2[1][0].cpu(), tensor))\n self.assertTrue(torch.equal(result2[1][1].cpu(), tensor))\n+ self.assertEqual(result2[2], 1)\n \n- result2 = send_to_device({\"a\": tensor, \"b\": [tensor, tensor]}, device)\n+ result2 = send_to_device({\"a\": tensor, \"b\": [tensor, tensor], \"c\": 1}, device)\n self.assertIsInstance(result2, dict)\n self.assertTrue(torch.equal(result2[\"a\"].cpu(), tensor))\n self.assertIsInstance(result2[\"b\"], list)\n self.assertTrue(torch.equal(result2[\"b\"][0].cpu(), tensor))\n self.assertTrue(torch.equal(result2[\"b\"][1].cpu(), tensor))\n+ self.assertEqual(result2[\"c\"], 1)\n \n- result3 = send_to_device(TestNamedTuple(a=tensor, b=[tensor, tensor]), device)\n+ result3 = send_to_device(TestNamedTuple(a=tensor, b=[tensor, tensor], c=1), device)\n self.assertIsInstance(result3, TestNamedTuple)\n self.assertTrue(torch.equal(result3.a.cpu(), tensor))\n self.assertIsInstance(result3.b, list)\n self.assertTrue(torch.equal(result3.b[0].cpu(), tensor))\n self.assertTrue(torch.equal(result3.b[1].cpu(), tensor))\n+ self.assertEqual(result3.c, 1)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/177", "pr_id": 745739393 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 582066414..73f8ecc0b 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import math\n from typing import List, Optional, Union\n \n import torch\n@@ -388,6 +389,14 @@ def __iter__(self):\n xm.mark_step()\n yield slice_tensors(batch, data_slice)\n \n+ def __len__(self):\n+ state = AcceleratorState()\n+ whole_length = super().__len__()\n+ if self.drop_last:\n+ return whole_length // state.num_processes\n+ else:\n+ return math.ceil(whole_length / state.num_processes)\n+\n \n def prepare_data_loader(\n dataloader: DataLoader,\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/175", "pr_id": 743530149 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 8df05578a..e25b80c49 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -255,7 +255,7 @@ def _convert_to_fp32(tensor):\n def _is_fp16_tensor(tensor):\n return hasattr(tensor, \"dtype\") and tensor.dtype == torch.float16\n \n- return recursively_apply(_is_fp16_tensor, tensor, test_type=_is_fp16_tensor)\n+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_tensor)\n \n \n def convert_outputs_to_fp32(model_forward):\n", "code_comments": [], "context": [ { "body": "Thanks for fixing this!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/173", "pr_id": 742813523 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 03e38baf8..582066414 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -12,7 +12,6 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import warnings\n from typing import List, Optional, Union\n \n import torch\n@@ -228,8 +227,8 @@ def __init__(\n process_index: int = 0,\n split_batches: bool = False,\n ):\n- if split_batches and batch_size % num_processes != 0:\n- warnings.warn(\n+ if split_batches and batch_size > 1 and batch_size % num_processes != 0:\n+ raise ValueError(\n f\"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) \"\n f\"needs to be a round multiple of the number of processes ({num_processes}).\"\n )\n@@ -466,9 +465,9 @@ def prepare_data_loader(\n process_index = state.process_index\n \n # Sanity check\n- if split_batches and dataloader.batch_size % num_processes != 0:\n- warnings.warn(\n- f\"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({dataloader.batch_size}) \"\n+ if split_batches and dataloader.batch_size > 1 and dataloader.batch_size % num_processes != 0:\n+ raise ValueError(\n+ f\"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) \"\n f\"needs to be a round multiple of the number of processes ({num_processes}).\"\n )\n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/170", "pr_id": 739375515 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a70ebdaba..ac79d3e0a 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -82,9 +82,10 @@ class Accelerator:\n \n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n- dispatch_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ dispatch_batches (:obj:`bool`, `optional`):\n If set to :obj:`True`, the dataloader prepared by the Accelerator is only iterated through on the main\n- process and then the batches are split and broadcast to each process.\n+ process and then the batches are split and broadcast to each process. Will default to :obj:`True` for\n+ :obj:`DataLoader` whose underlying dataset is an :obj:`IterableDataset`, :obj:`False` otherwise.\n kwargs_handlers (list of kwargs handlers, `optional`)\n A list of :obj:`KwargHandler` to customize how the objects related to distributed training or mixed\n precision are created. See :doc:`kwargs` for more information.\n@@ -103,7 +104,7 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n- dispatch_batches: bool = False,\n+ dispatch_batches: Optional[bool] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n if deepspeed_plugin is None: # init from env variables\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 1d464c55f..03e38baf8 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -398,7 +398,7 @@ def prepare_data_loader(\n split_batches: bool = False,\n put_on_device: bool = False,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n- dispatch_batches: bool = False,\n+ dispatch_batches: Optional[bool] = None,\n ) -> DataLoader:\n \"\"\"\n Wraps a PyTorch :obj:`DataLoader` to generate batches for one of the processes only.\n@@ -441,9 +441,10 @@ def prepare_data_loader(\n - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler in\n your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n- dispatch_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ dispatch_batches (:obj:`bool`, `optional`):\n If set to :obj:`True`, the datalaoder prepared is only iterated through on the main process and then the\n- batches are split and broadcast to each process.\n+ batches are split and broadcast to each process. Will default to :obj:`True` when the underlying dataset is\n+ an :obj:`IterableDataset`, :obj:`False` otherwise.\n \n Returns:\n :obj:`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n@@ -452,6 +453,9 @@ def prepare_data_loader(\n \n This does not support :obj:`BatchSampler` with varying batch size yet.\n \"\"\"\n+ if dispatch_batches is None:\n+ dispatch_batches = False if not put_on_device else isinstance(dataloader.dataset, IterableDataset)\n+\n if dispatch_batches and not put_on_device:\n raise ValueError(\"Using `dispatch_batches=True` requires `put_on_device=True`.\")\n # Grab defaults from AcceleratorState\n", "code_comments": [ { "body": "Clean!", "diff_hunk": "@@ -452,6 +453,9 @@ def prepare_data_loader(\n \n This does not support :obj:`BatchSampler` with varying batch size yet.\n \"\"\"\n+ if dispatch_batches is None:\n+ dispatch_batches = False if not put_on_device else isinstance(new_dataset, IterableDataset)", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/168", "pr_id": 737895559 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a35f7e268..a70ebdaba 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -82,6 +82,9 @@ class Accelerator:\n \n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n+ dispatch_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ If set to :obj:`True`, the dataloader prepared by the Accelerator is only iterated through on the main\n+ process and then the batches are split and broadcast to each process.\n kwargs_handlers (list of kwargs handlers, `optional`)\n A list of :obj:`KwargHandler` to customize how the objects related to distributed training or mixed\n precision are created. See :doc:`kwargs` for more information.\n@@ -100,6 +103,7 @@ def __init__(\n cpu: bool = False,\n deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n+ dispatch_batches: bool = False,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n if deepspeed_plugin is None: # init from env variables\n@@ -113,6 +117,7 @@ def __init__(\n \n self.device_placement = device_placement\n self.split_batches = split_batches\n+ self.dispatch_batches = dispatch_batches\n \n # Kwargs handlers\n self.ddp_handler = None\n@@ -384,6 +389,7 @@ def prepare_data_loader(self, data_loader):\n split_batches=self.split_batches,\n put_on_device=self.device_placement,\n rng_types=self.rng_types.copy(),\n+ dispatch_batches=self.dispatch_batches,\n )\n \n def prepare_optimizer(self, optimizer):\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 96648ca65..fd10140b6 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -20,7 +20,18 @@\n from packaging import version\n \n from .state import AcceleratorState, DistributedType, is_tpu_available\n-from .utils import RNGType, send_to_device, synchronize_rng_states\n+from .utils import (\n+ RNGType,\n+ broadcast,\n+ broadcast_object_list,\n+ concatenate,\n+ find_batch_size,\n+ get_data_structure,\n+ initialize_tensors,\n+ send_to_device,\n+ slice_tensors,\n+ synchronize_rng_states,\n+)\n \n \n if is_tpu_available():\n@@ -294,6 +305,90 @@ def __iter__(self):\n yield batch if self.device is None else send_to_device(batch, self.device)\n \n \n+class DataLoaderDispatcher(DataLoader):\n+ \"\"\"\n+ Subclass of a PyTorch :obj:`DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each\n+ process their part of the batch.\n+\n+ Args:\n+ split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ Whether the resulting :obj:`DataLoader` should split the batches of the original data loader across devices\n+ or yield full batches (in which case it will yield batches starting at the :obj:`process_index`-th and\n+ advancing of :obj:`num_processes` batches at each iteration).\n+\n+ Another way to see this is that the observed batch size will be the same as the initial :obj:`dataloader`\n+ if this option is set to :obj:`True`, the batch size of the initial :obj:`dataloader` multiplied by\n+ :obj:`num_processes` otherwise.\n+\n+ Setting this option to :obj:`True` requires that the batch size of the :obj:`dataloader` is a round\n+ multiple of :obj:`batch_size`.\n+ \"\"\"\n+\n+ def __init__(self, dataset, split_batches: bool = False, **kwargs):\n+ super().__init__(dataset, **kwargs)\n+ self.split_batches = split_batches\n+\n+ def __iter__(self):\n+ state = AcceleratorState()\n+ if state.process_index == 0:\n+ # We only iterate through the DataLoader on process 0.\n+ main_iterator = super().__iter__()\n+ stop_iteration = False\n+ while not stop_iteration:\n+ # On process 0, we gather the batch to dispatch.\n+ if state.process_index == 0:\n+ try:\n+ if self.split_batches:\n+ # One batch of the main iterator is dispatched and split.\n+ batch = next(main_iterator)\n+ else:\n+ # num_processes batches of the main iterator are concatenated then dispatched and split.\n+ # We add the batches one by one so we have the remainder available when drop_last=False.\n+ batches = []\n+ for _ in range(state.num_processes):\n+ batches.append(next(main_iterator))\n+ batch = concatenate(batches, dim=0)\n+ # In both cases, we need to get the structure of the batch that we will broadcast on other\n+ # processes to initialize the tensors with the right shape.\n+ # data_structure, stop_iteration\n+ batch_info = [get_data_structure(batch), False]\n+ except StopIteration:\n+ batch_info = [None, True]\n+ else:\n+ batch_info = [None, stop_iteration]\n+\n+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.\n+ broadcast_object_list(batch_info)\n+ stop_iteration = batch_info[1]\n+ if stop_iteration:\n+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.\n+ if not self.split_batches and not self.drop_last:\n+ if state.process_index == 0 and len(batches) > 0:\n+ batch = concatenate(batches, dim=0)\n+ batch_info = [get_data_structure(batch), False]\n+ else:\n+ batch_info = [None, True]\n+ broadcast_object_list(batch_info)\n+ if batch_info[1]:\n+ continue\n+ else:\n+ continue\n+\n+ if state.process_index != 0:\n+ # Initialize tensors on other processes than process 0.\n+ batch = initialize_tensors(batch_info[0])\n+ batch = send_to_device(batch, state.device)\n+ # Broadcast the batch before splitting it.\n+ batch = broadcast(batch, from_process=0)\n+\n+ batch_size = find_batch_size(batch) // state.num_processes\n+ data_slice = slice(state.process_index * batch_size, (state.process_index + 1) * batch_size)\n+\n+ if state.distributed_type == DistributedType.TPU:\n+ xm.mark_step()\n+ yield slice_tensors(batch, data_slice)\n+\n+\n def prepare_data_loader(\n dataloader: DataLoader,\n device: Optional[torch.device] = None,\n@@ -302,6 +397,7 @@ def prepare_data_loader(\n split_batches: bool = False,\n put_on_device: bool = False,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n+ dispatch_batches: bool = False,\n ) -> DataLoader:\n \"\"\"\n Wraps a PyTorch :obj:`DataLoader` to generate batches for one of the processes only.\n@@ -344,6 +440,10 @@ def prepare_data_loader(\n - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler in\n your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n+ dispatch_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ If set to :obj:`True`, the datalaoder prepared is only iterated through on the main process and then the\n+ batches are split and broadcast to each process.\n+\n Returns:\n :obj:`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n \n@@ -351,6 +451,8 @@ def prepare_data_loader(\n \n This does not support :obj:`BatchSampler` with varying batch size yet.\n \"\"\"\n+ if dispatch_batches and not put_on_device:\n+ raise ValueError(\"Using `dispatch_batches=True` requires `put_on_device=True`.\")\n # Grab defaults from AcceleratorState\n state = AcceleratorState()\n if num_processes is None:\n@@ -370,7 +472,7 @@ def prepare_data_loader(\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n generator = getattr(dataloader, \"generator\", None)\n # No change if no multiprocess\n- if num_processes != 1:\n+ if num_processes != 1 and not dispatch_batches:\n if isinstance(new_dataset, IterableDataset):\n if getattr(dataloader.dataset, \"generator\", None) is not None:\n generator = dataloader.dataset.generator\n@@ -421,6 +523,11 @@ def prepare_data_loader(\n if new_batch_sampler is None:\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n+ if dispatch_batches:\n+ return DataLoaderDispatcher(\n+ new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs\n+ )\n+\n return DataLoaderShard(\n new_dataset,\n device=device if put_on_device else None,\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex 2e5095533..41efcf61c 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -36,14 +36,14 @@ def init_state_check():\n def rng_sync_check():\n state = AcceleratorState()\n synchronize_rng_states([\"torch\"])\n- assert are_the_same_tensors(torch.get_rng_state())\n+ assert are_the_same_tensors(torch.get_rng_state()), \"RNG states improperly synchronized on CPU.\"\n if state.distributed_type == DistributedType.MULTI_GPU:\n synchronize_rng_states([\"cuda\"])\n- assert are_the_same_tensors(torch.cuda.get_rng_state())\n+ assert are_the_same_tensors(torch.cuda.get_rng_state()), \"RNG states improperly synchronized on GPU.\"\n if version.parse(torch.__version__) >= version.parse(\"1.6.0\"):\n generator = torch.Generator()\n synchronize_rng_states([\"generator\"], generator=generator)\n- assert are_the_same_tensors(generator.get_state())\n+ assert are_the_same_tensors(generator.get_state()), \"RNG states improperly synchronized in generator.\"\n \n if state.local_process_index == 0:\n print(\"All rng are properly synched.\")\n@@ -59,7 +59,7 @@ def dl_preparation_check():\n for batch in dl:\n result.append(gather(batch))\n result = torch.cat(result)\n- assert torch.equal(result.cpu(), torch.arange(0, length).long())\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), \"Wrong non-shuffled dataloader result.\"\n \n dl = DataLoader(range(length), batch_size=8)\n dl = prepare_data_loader(\n@@ -74,7 +74,7 @@ def dl_preparation_check():\n for batch in dl:\n result.append(gather(batch))\n result = torch.cat(result)\n- assert torch.equal(result.cpu(), torch.arange(0, length).long())\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), \"Wrong non-shuffled dataloader result.\"\n \n if state.process_index == 0:\n print(\"Non-shuffled dataloader passing.\")\n@@ -86,7 +86,7 @@ def dl_preparation_check():\n result.append(gather(batch))\n result = torch.cat(result).tolist()\n result.sort()\n- assert result == list(range(length))\n+ assert result == list(range(length)), \"Wrong shuffled dataloader result.\"\n \n dl = DataLoader(range(length), batch_size=8, shuffle=True)\n dl = prepare_data_loader(\n@@ -102,12 +102,77 @@ def dl_preparation_check():\n result.append(gather(batch))\n result = torch.cat(result).tolist()\n result.sort()\n- assert result == list(range(length))\n+ assert result == list(range(length)), \"Wrong shuffled dataloader result.\"\n \n if state.local_process_index == 0:\n print(\"Shuffled dataloader passing.\")\n \n \n+def central_dl_preparation_check():\n+ state = AcceleratorState()\n+ length = 32 * state.num_processes\n+\n+ dl = DataLoader(range(length), batch_size=8)\n+ dl = prepare_data_loader(\n+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result)\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), \"Wrong non-shuffled dataloader result.\"\n+\n+ dl = DataLoader(range(length), batch_size=8)\n+ dl = prepare_data_loader(\n+ dl,\n+ state.device,\n+ state.num_processes,\n+ state.process_index,\n+ put_on_device=True,\n+ split_batches=True,\n+ dispatch_batches=True,\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result)\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), \"Wrong non-shuffled dataloader result.\"\n+\n+ if state.process_index == 0:\n+ print(\"Non-shuffled central dataloader passing.\")\n+\n+ dl = DataLoader(range(length), batch_size=8, shuffle=True)\n+ dl = prepare_data_loader(\n+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result).tolist()\n+ result.sort()\n+ assert result == list(range(length)), \"Wrong shuffled dataloader result.\"\n+\n+ dl = DataLoader(range(length), batch_size=8, shuffle=True)\n+ dl = prepare_data_loader(\n+ dl,\n+ state.device,\n+ state.num_processes,\n+ state.process_index,\n+ put_on_device=True,\n+ split_batches=True,\n+ dispatch_batches=True,\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result).tolist()\n+ result.sort()\n+ assert result == list(range(length)), \"Wrong shuffled dataloader result.\"\n+\n+ if state.local_process_index == 0:\n+ print(\"Shuffled central dataloader passing.\")\n+\n+\n def mock_training(length, batch_size, generator):\n set_seed(42)\n generator.manual_seed(42)\n@@ -132,8 +197,8 @@ def training_check():\n length = batch_size * 4 * state.num_processes\n \n train_set, old_model = mock_training(length, batch_size * state.num_processes, generator)\n- assert are_the_same_tensors(old_model.a)\n- assert are_the_same_tensors(old_model.b)\n+ assert are_the_same_tensors(old_model.a), \"Did not obtain the same model on both processes.\"\n+ assert are_the_same_tensors(old_model.b), \"Did not obtain the same model on both processes.\"\n \n accelerator = Accelerator()\n train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n@@ -152,8 +217,8 @@ def training_check():\n optimizer.step()\n \n model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a)\n- assert torch.allclose(old_model.b, model.b)\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n accelerator.print(\"Training yielded the same results on one CPU or distributed setup with no batch split.\")\n \n@@ -174,8 +239,8 @@ def training_check():\n optimizer.step()\n \n model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a)\n- assert torch.allclose(old_model.b, model.b)\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n accelerator.print(\"Training yielded the same results on one CPU or distributes setup with batch split.\")\n \n@@ -197,8 +262,8 @@ def training_check():\n optimizer.step()\n \n model = accelerator.unwrap_model(model).cpu()\n- assert torch.allclose(old_model.a, model.a)\n- assert torch.allclose(old_model.b, model.b)\n+ assert torch.allclose(old_model.a, model.a), \"Did not obtain the same model on CPU or distributed training.\"\n+ assert torch.allclose(old_model.b, model.b), \"Did not obtain the same model on CPU or distributed training.\"\n \n \n def main():\n@@ -215,6 +280,7 @@ def main():\n if state.local_process_index == 0:\n print(\"\\n**DataLoader integration test**\")\n dl_preparation_check()\n+ central_dl_preparation_check()\n \n if state.local_process_index == 0:\n print(\"\\n**Training integration test**\")\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 026ab9501..8df05578a 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -48,6 +48,12 @@ class RNGType(Enum):\n GENERATOR = \"generator\"\n \n \n+@dataclass\n+class TensorInformation:\n+ shape: torch.Size\n+ dtype: torch.dtype\n+\n+\n def set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n@@ -115,6 +121,66 @@ def honor_type(obj, generator):\n return type(obj)(generator)\n \n \n+def is_torch_tensor(tensor):\n+ return isinstance(tensor, torch.Tensor)\n+\n+\n+def is_tensor_information(tensor_info):\n+ return isinstance(tensor_info, TensorInformation)\n+\n+\n+def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):\n+ \"\"\"\n+ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.\n+\n+ Args:\n+ func (:obj:`callable`):\n+ The function to recursively apply.\n+ data (nested list/tuple/dictionary of :obj:`main_type`):\n+ The data on which to apply :obj:`func`\n+ *args:\n+ Positional arguments that will be passed to :obj:`func` when applied on the unpacked data.\n+ main_type (:obj:`type`, `optional`, defaults to :obj:`torch.Tensor`):\n+ The base type of the objects to which apply :obj:`func`.\n+ error_on_other_type (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ Whether to return an error or not if after unpacking :obj:`data`, we get on an object that is not of type\n+ :obj:`main_type`. If :obj:`False`, the function will leave objects of types different than :obj:`main_type`\n+ unchanged.\n+ **kwargs:\n+ Keyword arguments that will be passed to :obj:`func` when applied on the unpacked data.\n+\n+ Returns:\n+ The same data structure as :obj:`data` with :obj:`func` applied to every object of type :obj:`main_type`.\n+ \"\"\"\n+ if isinstance(data, (tuple, list)):\n+ return honor_type(\n+ data,\n+ (\n+ recursively_apply(\n+ func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n+ )\n+ for o in data\n+ ),\n+ )\n+ elif isinstance(data, dict):\n+ return type(data)(\n+ **{\n+ k: recursively_apply(\n+ func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs\n+ )\n+ for k, v in data.items()\n+ }\n+ )\n+ elif test_type(data):\n+ return func(data, *args, **kwargs)\n+ elif error_on_other_type:\n+ raise TypeError(\n+ f\"Can't apply {func.__name__} on object of type {type(data)}, only of nested list/tuple/dicts of objects \"\n+ f\"that satisfy {test_type.__name__}.\"\n+ )\n+ return data\n+\n+\n def send_to_device(tensor, device):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n@@ -128,18 +194,52 @@ def send_to_device(tensor, device):\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n- if isinstance(tensor, (list, tuple)):\n- return honor_type(tensor, (send_to_device(t, device) for t in tensor))\n- elif isinstance(tensor, dict):\n- return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n- elif not hasattr(tensor, \"to\"):\n- return tensor\n- return tensor.to(device)\n+\n+ def _send_to_device(t, device):\n+ return t.to(device)\n+\n+ def _has_to_method(t):\n+ return hasattr(t, \"to\")\n+\n+ return recursively_apply(_send_to_device, tensor, device, test_type=_has_to_method, error_on_other_type=True)\n+\n+\n+def get_data_structure(data):\n+ \"\"\"\n+ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.\n+\n+ Args:\n+ data (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ The data to send to analyze.\n+\n+ Returns:\n+ The same data structure as :obj:`data` with :class:`~accelerate.utils.TensorInformation` instead of tensors.\n+ \"\"\"\n+\n+ def _get_data_structure(tensor):\n+ return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)\n+\n+ return recursively_apply(_get_data_structure, data)\n+\n+\n+def initialize_tensors(data_structure):\n+ \"\"\"\n+ Recursively initializes tensors from a nested list/tuple/dictionary of\n+ :class:`~accelerate.utils.TensorInformation`.\n+\n+ Returns:\n+ The same data structure as :obj:`data` with tensors instead of :class:`~accelerate.utils.TensorInformation`.\n+ \"\"\"\n+\n+ def _initialize_tensor(tensor_info):\n+ return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)\n+\n+ return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)\n \n \n def convert_to_fp32(tensor):\n \"\"\"\n- Recursively converts the lements nested list/tuple/dictionary of tensors in FP16 precision to FP32.\n+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16 precision to FP32.\n \n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n@@ -148,13 +248,14 @@ def convert_to_fp32(tensor):\n Returns:\n The same data structure as :obj:`tensor` with all tensors that were in FP16 precision converted to FP32.\n \"\"\"\n- if isinstance(tensor, (list, tuple)):\n- return honor_type(tensor, (convert_to_fp32(t) for t in tensor))\n- elif isinstance(tensor, dict):\n- return type(tensor)(**{k: convert_to_fp32(v) for k, v in tensor.items()})\n- elif not hasattr(tensor, \"dtype\") or tensor.dtype != torch.float16:\n- return tensor\n- return tensor.float()\n+\n+ def _convert_to_fp32(tensor):\n+ return tensor.float()\n+\n+ def _is_fp16_tensor(tensor):\n+ return hasattr(tensor, \"dtype\") and tensor.dtype == torch.float16\n+\n+ return recursively_apply(_is_fp16_tensor, tensor, test_type=_is_fp16_tensor)\n \n \n def convert_outputs_to_fp32(model_forward):\n@@ -196,7 +297,7 @@ def extract_model_from_parallel(model):\n return model\n \n \n-def _tpu_gather(tensor, name=\"tensor\"):\n+def _tpu_gather(tensor, name=\"gather tensor\"):\n if isinstance(tensor, (list, tuple)):\n return honor_type(tensor, (_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n elif isinstance(tensor, dict):\n@@ -209,17 +310,14 @@ def _tpu_gather(tensor, name=\"tensor\"):\n \n \n def _gpu_gather(tensor):\n- if isinstance(tensor, (list, tuple)):\n- return honor_type(tensor, (_gpu_gather(t) for t in tensor))\n- elif isinstance(tensor, dict):\n- return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})\n- elif not isinstance(tensor, torch.Tensor):\n- raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n- if tensor.ndim == 0:\n- tensor = tensor.clone()[None]\n- output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n- torch.distributed.all_gather(output_tensors, tensor)\n- return torch.cat(output_tensors, dim=0)\n+ def _gpu_gather_one(tensor):\n+ if tensor.ndim == 0:\n+ tensor = tensor.clone()[None]\n+ output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n+ torch.distributed.all_gather(output_tensors, tensor)\n+ return torch.cat(output_tensors, dim=0)\n+\n+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)\n \n \n _cpu_gather = _gpu_gather\n@@ -246,6 +344,130 @@ def gather(tensor):\n return tensor\n \n \n+def _gpu_broadcast(data, src=0):\n+ def _gpu_broadcast_one(tensor, src=0):\n+ torch.distributed.broadcast(tensor, src=src)\n+ return tensor\n+\n+ return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)\n+\n+\n+def _tpu_broadcast(tensor, src=0, name=\"broadcast tensor\"):\n+ if isinstance(tensor, (list, tuple)):\n+ return honor_type(tensor, (_tpu_broadcast(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n+ elif isinstance(tensor, dict):\n+ return type(tensor)({k: _tpu_broadcast(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n+ return xm.mesh_reduce(name, tensor, lambda x: x[src])\n+\n+\n+def broadcast(tensor, from_process: int = 0):\n+ \"\"\"\n+ Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.\n+\n+ Args:\n+ tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ The data to gather.\n+ from_process (:obj:`int`, `optional`, defaults to 0):\n+ The process from which to send the data\n+\n+ Returns:\n+ The same data structure as :obj:`tensor` with all tensors broadcasted to the proper device.\n+ \"\"\"\n+ if AcceleratorState().distributed_type == DistributedType.TPU:\n+ return _tpu_broadcast(tensor, src=from_process, name=\"accelerate.utils.broadcast\")\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ return _gpu_broadcast(tensor, src=from_process)\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n+ return _gpu_broadcast(tensor, src=from_process)\n+ else:\n+ return tensor\n+\n+\n+def broadcast_object_list(object_list, from_process: int = 0):\n+ \"\"\"\n+ Broadcast a list of picklable objects form one process to the others.\n+\n+ Args:\n+ object_list (list of picklable objects):\n+ The list of objects to broadcast. This list will be modified inplace.\n+ from_process (:obj:`int`, `optional`, defaults to 0):\n+ The process from which to send the data.\n+\n+ Returns:\n+ The same list containing the objects from process 0.\n+ \"\"\"\n+ if AcceleratorState().distributed_type == DistributedType.TPU:\n+ for i, obj in enumerate(object_list):\n+ object_list[i] = xm.mesh_reduce(\"accelerate.utils.broadcast_object_list\", obj, lambda x: x[from_process])\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ torch.distributed.broadcast_object_list(object_list, src=from_process)\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n+ torch.distributed.broadcast_object_list(object_list, src=from_process)\n+ return object_list\n+\n+\n+def slice_tensors(data, tensor_slice):\n+ \"\"\"\n+ Recursively takes a slice in a nested list/tuple/dictionary of tensors.\n+\n+ Args:\n+ data (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ The data to slice.\n+ tensor_slice (:obj:`slice`):\n+ The slice to take.\n+\n+ Returns:\n+ The same data structure as :obj:`data` with all the tensors slices.\n+ \"\"\"\n+\n+ def _slice_tensor(tensor, tensor_slice):\n+ return tensor[tensor_slice]\n+\n+ return recursively_apply(_slice_tensor, data, tensor_slice)\n+\n+\n+def find_batch_size(data):\n+ \"\"\"\n+ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.\n+\n+ Args:\n+ data (nested list/tuple/dictionary of :obj:`torch.Tensor`): The data from which to find the batch size.\n+\n+ Returns:\n+ :obj:`int`: The batch size.\n+ \"\"\"\n+ if isinstance(data, (tuple, list)):\n+ return find_batch_size(data[0])\n+ elif isinstance(data, dict):\n+ for k in data.keys():\n+ return find_batch_size(data[k])\n+ elif not isinstance(data, torch.Tensor):\n+ raise TypeError(f\"Can only find the batch size of tensors but got {type(data)}.\")\n+ return data.shape[0]\n+\n+\n+def concatenate(data, dim=0):\n+ \"\"\"\n+ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.\n+\n+ Args:\n+ data (nested list/tuple/dictionary of lists of tensors :obj:`torch.Tensor`):\n+ The data to concatenate.\n+ dim (:obj:`int`, `optional`, defaults to 0):\n+ The dimension on which to concatenate.\n+\n+ Returns:\n+ The same data structure as :obj:`data` with all the tensors concatenated.\n+ \"\"\"\n+ if isinstance(data[0], (tuple, list)):\n+ return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n+ elif isinstance(data[0], dict):\n+ return type(data[0])(**{k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n+ elif not isinstance(data[0], torch.Tensor):\n+ raise TypeError(f\"Can only concatenate tensors but got {type(data[0])}\")\n+ return torch.cat(data, dim=dim)\n+\n+\n def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n \"\"\"\n Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they\n@@ -261,36 +483,35 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n pad_first (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to pad at the beginning or the end.\n \"\"\"\n- if isinstance(tensor, (list, tuple)):\n- return honor_type(tensor, (pad_across_processes(t, dim=dim, pad_index=pad_index) for t in tensor))\n- elif isinstance(tensor, dict):\n- return type(tensor)({k: pad_across_processes(v, dim=dim, pad_index=pad_index) for k, v in tensor.items()})\n- elif not isinstance(tensor, torch.Tensor):\n- raise TypeError(f\"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n \n- if dim >= len(tensor.shape):\n- return tensor\n-\n- # Gather all sizes\n- size = torch.tensor(tensor.shape, device=tensor.device)[None]\n- sizes = gather(size).cpu()\n- # Then pad to the maximum size\n- max_size = max(s[dim] for s in sizes)\n- if max_size == tensor.shape[dim]:\n- return tensor\n-\n- old_size = tensor.shape\n- new_size = list(old_size)\n- new_size[dim] = max_size\n- new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index\n- if pad_first:\n- indices = tuple(\n- slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))\n- )\n- else:\n- indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))\n- new_tensor[indices] = tensor\n- return new_tensor\n+ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n+ if dim >= len(tensor.shape):\n+ return tensor\n+\n+ # Gather all sizes\n+ size = torch.tensor(tensor.shape, device=tensor.device)[None]\n+ sizes = gather(size).cpu()\n+ # Then pad to the maximum size\n+ max_size = max(s[dim] for s in sizes)\n+ if max_size == tensor.shape[dim]:\n+ return tensor\n+\n+ old_size = tensor.shape\n+ new_size = list(old_size)\n+ new_size[dim] = max_size\n+ new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index\n+ if pad_first:\n+ indices = tuple(\n+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))\n+ )\n+ else:\n+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))\n+ new_tensor[indices] = tensor\n+ return new_tensor\n+\n+ return recursively_apply(\n+ _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first\n+ )\n \n \n def wait_for_everyone():\n", "code_comments": [ { "body": "```suggestion\r\n If set to :obj:`True`, the dataloader prepared by the Accelerator is only iterated through on the main\r\n```", "diff_hunk": "@@ -82,6 +82,9 @@ class Accelerator:\n \n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n+ central_dataloader (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ If set to :obj:`True`, the datalaoder prepared by the Accelerator is only iterated through on the main", "from_author": false }, { "body": "I don't like `central` as I don't really understand what it means from a quick read, but I spent 10 minutes trying to find a better name without success, so fine to leave it like it is :)", "diff_hunk": "@@ -82,6 +82,9 @@ class Accelerator:\n \n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n+ central_dataloader (:obj:`bool`, `optional`, defaults to :obj:`False`):", "from_author": false }, { "body": "What is this `my_batch_size` used for?", "diff_hunk": "@@ -421,6 +524,13 @@ def prepare_data_loader(\n if new_batch_sampler is None:\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n+ if central_dataloader:\n+ data_loader = DataLoaderDispatcher(\n+ new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs\n+ )\n+ data_loader.my_batch_size = dataloader.batch_size", "from_author": false }, { "body": "Please add an error message if it fails :)", "diff_hunk": "@@ -108,6 +108,71 @@ def dl_preparation_check():\n print(\"Shuffled dataloader passing.\")\n \n \n+def central_dl_preparation_check():\n+ state = AcceleratorState()\n+ length = 32 * state.num_processes\n+\n+ dl = DataLoader(range(length), batch_size=8)\n+ dl = prepare_data_loader(\n+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, central_dataloader=True\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result)\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long())", "from_author": false }, { "body": "Oh actually this applies to all `assert` in this file!", "diff_hunk": "@@ -108,6 +108,71 @@ def dl_preparation_check():\n print(\"Shuffled dataloader passing.\")\n \n \n+def central_dl_preparation_check():\n+ state = AcceleratorState()\n+ length = 32 * state.num_processes\n+\n+ dl = DataLoader(range(length), batch_size=8)\n+ dl = prepare_data_loader(\n+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, central_dataloader=True\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result)\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long())", "from_author": false }, { "body": "Woopsie, this is some leftover to clean up! Thanks for flagging!", "diff_hunk": "@@ -421,6 +524,13 @@ def prepare_data_loader(\n if new_batch_sampler is None:\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n \n+ if central_dataloader:\n+ data_loader = DataLoaderDispatcher(\n+ new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, **kwargs\n+ )\n+ data_loader.my_batch_size = dataloader.batch_size", "from_author": true }, { "body": "This is just a test script ;-)", "diff_hunk": "@@ -108,6 +108,71 @@ def dl_preparation_check():\n print(\"Shuffled dataloader passing.\")\n \n \n+def central_dl_preparation_check():\n+ state = AcceleratorState()\n+ length = 32 * state.num_processes\n+\n+ dl = DataLoader(range(length), batch_size=8)\n+ dl = prepare_data_loader(\n+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, central_dataloader=True\n+ )\n+ result = []\n+ for batch in dl:\n+ result.append(gather(batch))\n+ result = torch.cat(result)\n+ assert torch.equal(result.cpu(), torch.arange(0, length).long())", "from_author": true }, { "body": "Let's go for `dispath_batches` to look like `split_batches`", "diff_hunk": "@@ -82,6 +82,9 @@ class Accelerator:\n \n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n+ central_dataloader (:obj:`bool`, `optional`, defaults to :obj:`False`):", "from_author": true } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/164", "pr_id": 734968636 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex d81e4ab61..ca49adf45 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -203,6 +203,8 @@ def _tpu_gather(tensor, name=\"tensor\"):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n+ if tensor.ndim == 0:\n+ tensor = tensor.clone()[None]\n return xm.mesh_reduce(name, tensor, torch.cat)\n \n \n@@ -213,6 +215,8 @@ def _gpu_gather(tensor):\n return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n+ if tensor.ndim == 0:\n+ tensor = tensor.clone()[None]\n output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(output_tensors, tensor)\n return torch.cat(output_tensors, dim=0)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/152", "pr_id": 722757010 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 3e9f5f470..585d2aa88 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -152,7 +152,7 @@ just run\n \n accelerate config\n \n-on your machine and reply to the questions asked. This will save a `default_config.json` file in your cache folder for\n+on your machine and reply to the questions asked. This will save a `default_config.yaml` file in your cache folder for\n πŸ€— Accelerate. That cache folder is (with decreasing order of priority):\n \n - The content of your environment variable ``HF_HOME`` suffixed with `accelerate`.\n@@ -176,7 +176,7 @@ Note that if you specified a location for the config file in the previous step,\n \n .. code-block:: bash\n \n- accelerate test --config_file path_to_config.json\n+ accelerate test --config_file path_to_config.yaml\n \n \n Now that this is done, you can run your script with the following command:\n@@ -190,7 +190,7 @@ If you stored the config file in a non-default location, you can indicate it to\n \n .. code-block:: bash\n \n- accelerate launch --config_file path_to_config.json path_to_script.py --args_for_the_script\n+ accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script\n \n You can also override any of the arguments determined by your config file, see TODO: insert ref here.\n \ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nindex 48ea96ab1..5bafb3be2 100644\n--- a/src/accelerate/commands/config/__init__.py\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -48,7 +48,7 @@ def config_command_parser(subparsers=None):\n \"--config_file\",\n default=None,\n help=(\n- \"The path to use to store the config file. Will default to a file named default_config.json in the cache \"\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n \"with 'huggingface'.\"\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 55b019fa4..39c00c280 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -28,7 +28,7 @@\n os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n )\n cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n-default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n \n # For backward compatibility: the default config is the json one if it's the only existing file.\ndiff --git a/src/accelerate/commands/test.py b/src/accelerate/commands/test.py\nindex ec6d2524b..379233679 100644\n--- a/src/accelerate/commands/test.py\n+++ b/src/accelerate/commands/test.py\n@@ -30,7 +30,7 @@ def test_command_parser(subparsers=None):\n \"--config_file\",\n default=None,\n help=(\n- \"The path to use to store the config file. Will default to a file named default_config.json in the cache \"\n+ \"The path to use to store the config file. Will default to a file named default_config.yaml in the cache \"\n \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n \"with 'huggingface'.\"\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/151", "pr_id": 722375795 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex a6b7a7d74..c5238369c 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -388,4 +388,5 @@ def __post_init__(self):\n },\n },\n \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ \"zero_allow_untested_optimizer\": True,\n }\n", "code_comments": [], "context": [ { "body": "FYI, I'm not involved with accelerate's deepspeed integration. I suggested that it will be done in same way as transformers when it was first contributed, but it was still committed in a different way. Thus now the user has to use a completely different setup when switching from transformers and developers have to maintain something totally different. So I'm not participating in this particular endeavour.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/150", "pr_id": 722373435 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex a6b7a7d74..d81e4ab61 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -151,7 +151,7 @@ def convert_to_fp32(tensor):\n if isinstance(tensor, (list, tuple)):\n return honor_type(tensor, (convert_to_fp32(t) for t in tensor))\n elif isinstance(tensor, dict):\n- return type(tensor)({k: convert_to_fp32(v) for k, v in tensor.items()})\n+ return type(tensor)(**{k: convert_to_fp32(v) for k, v in tensor.items()})\n elif not hasattr(tensor, \"dtype\") or tensor.dtype != torch.float16:\n return tensor\n return tensor.float()\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/149", "pr_id": 721725679 }, { "diff": "diff --git a/docs/source/index.rst b/docs/source/index.rst\nindex 8707ed373..3b1281c84 100644\n--- a/docs/source/index.rst\n+++ b/docs/source/index.rst\n@@ -136,7 +136,7 @@ Supported integrations\n - multi-GPU on several nodes (machines)\n - TPU\n - FP16 with native AMP (apex on the roadmap)\n-- DeepSpeed (experimental)\n+- DeepSpeed (experimental support)\n \n .. toctree::\n :maxdepth: 2\ndiff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 68a23fff5..3e9f5f470 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -381,6 +381,22 @@ this:\n lr_scheduler.step()\n \n \n+DeepSpeed\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight\n+breaking changes. In particular, πŸ€— Accelerate does not support DeepSpeed config you have written yourself yet, this\n+will be added in a next version.\n+\n+One main caveat for the DeepSpeed integration is that the DeepSpeed launcher always passes a ``local_rank`` variable to\n+the training script, so your training script should accept it (whether you launch training with the DeepSpeed launcher\n+or ``accelerate launch``).\n+\n+.. Warning::\n+\n+ The :func:`~accelerate.notebook_launcher` does not support the DeepSpeed integration yet.\n+\n+\n Internal mechanism\n -----------------------------------------------------------------------------------------------------------------------\n \n", "code_comments": [ { "body": "```suggestion\r\nor ``accelerate launch``).\r\n```", "diff_hunk": "@@ -381,6 +381,22 @@ this:\n lr_scheduler.step()\n \n \n+DeepSpeed\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight\n+breaking changes. In particular, πŸ€— Accelerate does not support DeepSpeed config you have written yourself yet, this\n+will be added in a next version.\n+\n+One main caveat for the DeepSpeed integration is that the DeepSpeed launcher always passes a ``local_rank`` variable to\n+the training script, so your training script should accept it (whether you launch training with the DeepSpeed launcher\n+or ``accelerate launch`` Accelerate.", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/140", "pr_id": 707116659 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 7ccdf5daf..07035a427 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -358,6 +358,20 @@ softmax). However you might want to put your loss computation inside the `accele\n with accelerator.autocast():\n loss = complex_loss_function(outputs, target):\n \n+Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and\n+sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the\n+gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.\n+\n+This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may\n+have an impact when you have very little training data, or if the first learning rate values of your scheduler are very\n+important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like\n+this:\n+\n+.. codeblock::\n+\n+ if not accelerator.optimizer_step_was_skipped:\n+ lr_scheduler.step()\n+\n \n Internal mechanism\n -----------------------------------------------------------------------------------------------------------------------\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a11b7a7a5..a35f7e268 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -566,3 +566,14 @@ def autocast(self):\n autocast_context.__exit__()\n else:\n yield\n+\n+ @property\n+ def optimizer_step_was_skipped(self):\n+ \"\"\"\n+ Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which\n+ case the learning rate should not be changed.\n+ \"\"\"\n+ for optimizer in self._optimizers:\n+ if optimizer.is_overflow:\n+ return True\n+ return False\ndiff --git a/src/accelerate/deepspeed_utils.py b/src/accelerate/deepspeed_utils.py\nindex e52c0c030..07450a87e 100644\n--- a/src/accelerate/deepspeed_utils.py\n+++ b/src/accelerate/deepspeed_utils.py\n@@ -89,7 +89,7 @@ def step(self):\n \n @property\n def is_overflow(self):\n- \"\"\"This must be called before lr_scheduler.step() when using deepspeed with fp16\"\"\"\n+ \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n overflow = False\n if hasattr(self.optimizer, \"overflow\"):\n overflow = self.optimizer.overflow\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex b2909dbb5..37ca78833 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -53,6 +53,7 @@ def __init__(self, optimizer, device_placement=True, scaler=None):\n self.scaler = scaler\n self.state = AcceleratorState()\n self.device_placement = device_placement\n+ self._is_overflow = False\n \n # Handle device placement\n if device_placement:\n@@ -108,8 +109,12 @@ def step(self, closure=None):\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n+ scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n+ scale_after = self.scaler.get_scale()\n+ # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.\n+ self._is_overflow = scale_after < scale_before\n else:\n self.optimizer.step(closure)\n \n@@ -119,5 +124,5 @@ def _switch_parameters(self, parameters_map):\n \n @property\n def is_overflow(self):\n- \"\"\"This needs to be implemented at the end\"\"\"\n- return False # TODO: implement it\n+ \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n+ return self._is_overflow\n", "code_comments": [ { "body": "```suggestion\r\nsometimes during training: because of the dynamic loss scaling strategy, there are points during training where the\r\n```", "diff_hunk": "@@ -358,6 +358,20 @@ softmax). However you might want to put your loss computation inside the `accele\n with accelerator.autocast():\n loss = complex_loss_function(outputs, target):\n \n+Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and\n+sometimes during training: because of the dynamic loss scaling strategy, there are points at training where the", "from_author": false }, { "body": "```suggestion\r\nhave an impact when you have very little training data, or if the first learning rate values of your scheduler are very\r\n```", "diff_hunk": "@@ -358,6 +358,20 @@ softmax). However you might want to put your loss computation inside the `accele\n with accelerator.autocast():\n loss = complex_loss_function(outputs, target):\n \n+Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and\n+sometimes during training: because of the dynamic loss scaling strategy, there are points at training where the\n+gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.\n+\n+This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may\n+have an impace when you have very little training data, or if the first learning rate values of your scheduler are very", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/139", "pr_id": 706448160 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 7ccdf5daf..2efdf6a82 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -251,6 +251,13 @@ To introduce special behavior in your script for TPUs you can check the :obj:`di\n The `NLP example <https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py>`__ shows an example in\n situation with dynamic padding.\n \n+One last thing to pay close attnetion to: if your model has tied weights (such as language models which tie the weights\n+of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you\n+passed your model to :meth:`~accelerate.Accelerator.prepare`) will break the tying. You will need to retie the weights\n+after. You can find an example of this in the `run_clm_no_trainer\n+<https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py>`__ script in\n+the Transformers repository.\n+\n \n Other caveats\n -----------------------------------------------------------------------------------------------------------------------\n@@ -345,6 +352,7 @@ If you are using gradient clipping in your script, you should replace the calls\n :obj:`torch.nn.utils.clip_grad_norm_` or :obj:`torch.nn.utils.clip_grad_value_` with :obj:`accelerator.clip_grad_norm_`\n and :obj:`accelerator.clip_grad_value_` respectively.\n \n+\n Mixed Precision training\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/138", "pr_id": 706392343 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 1e28fed85..7ccdf5daf 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -345,6 +345,19 @@ If you are using gradient clipping in your script, you should replace the calls\n :obj:`torch.nn.utils.clip_grad_norm_` or :obj:`torch.nn.utils.clip_grad_value_` with :obj:`accelerator.clip_grad_norm_`\n and :obj:`accelerator.clip_grad_value_` respectively.\n \n+Mixed Precision training\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+If you are running your training in Mixed Precision with Accelerate, you will get the best result with your loss being\n+computed inside your model (like in Transformer models for instance). Every computation outside of the model will be\n+executed in full precision (which is generally what you want for loss computation, expecially if it involves a\n+softmax). However you might want to put your loss computation inside the `accelerator.autocast` context manager:\n+\n+.. codeblock::\n+\n+ with accelerator.autocast():\n+ loss = complex_loss_function(outputs, target):\n+\n \n Internal mechanism\n -----------------------------------------------------------------------------------------------------------------------\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 281f4e9a7..a11b7a7a5 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -28,6 +28,7 @@\n from .utils import (\n DeepSpeedPlugin,\n RNGType,\n+ convert_outputs_to_fp32,\n extract_model_from_parallel,\n gather,\n pad_across_processes,\n@@ -295,6 +296,7 @@ def prepare_model(self, model):\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n+ model.forward = convert_outputs_to_fp32(model.forward)\n return model\n \n def _prepare_deepspeed(self, *args):\n@@ -550,3 +552,17 @@ def get_state_dict(self, model):\n state_dict[k] = state_dict[k].float()\n \n return state_dict\n+\n+ @contextmanager\n+ def autocast(self):\n+ \"\"\"\n+ Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing\n+ different will happen otherwise.\n+ \"\"\"\n+ if self.native_amp:\n+ autocast_context = torch.cuda.amp.autocast()\n+ autocast_context.__enter__()\n+ yield\n+ autocast_context.__exit__()\n+ else:\n+ yield\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 6edd1f1c0..a6b7a7d74 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -137,6 +137,46 @@ def send_to_device(tensor, device):\n return tensor.to(device)\n \n \n+def convert_to_fp32(tensor):\n+ \"\"\"\n+ Recursively converts the lements nested list/tuple/dictionary of tensors in FP16 precision to FP32.\n+\n+ Args:\n+ tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ The data to convert from FP16 to FP32.\n+\n+ Returns:\n+ The same data structure as :obj:`tensor` with all tensors that were in FP16 precision converted to FP32.\n+ \"\"\"\n+ if isinstance(tensor, (list, tuple)):\n+ return honor_type(tensor, (convert_to_fp32(t) for t in tensor))\n+ elif isinstance(tensor, dict):\n+ return type(tensor)({k: convert_to_fp32(v) for k, v in tensor.items()})\n+ elif not hasattr(tensor, \"dtype\") or tensor.dtype != torch.float16:\n+ return tensor\n+ return tensor.float()\n+\n+\n+def convert_outputs_to_fp32(model_forward):\n+ \"\"\"\n+ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16\n+ precision will be convert back to FP32.\n+\n+ Args:\n+ model_forward (:obj:`Callable`):\n+ The function which outputs we want to treat.\n+\n+ Returns:\n+ The same function as :obj:`model_forward` but with converted outputs.\n+ \"\"\"\n+\n+ def convert_outputs(*args, **kwargs):\n+ outputs = model_forward(*args, **kwargs)\n+ return convert_to_fp32(outputs)\n+\n+ return convert_outputs\n+\n+\n def extract_model_from_parallel(model):\n \"\"\"\n Extract a model from its distributed containers.\n", "code_comments": [ { "body": "```suggestion\r\n Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing\r\n```", "diff_hunk": "@@ -550,3 +552,17 @@ def get_state_dict(self, model):\n state_dict[k] = state_dict[k].float()\n \n return state_dict\n+\n+ @contextmanager\n+ def autocast(self):\n+ \"\"\"\n+ Will apply automatic mixed precision inside the block inside thics context manager, if it is enabled. Nothing", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/134", "pr_id": 704484703 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 30fd8314e..57ea686f9 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -52,6 +52,7 @@ def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n+ self.device_placement = device_placement\n \n # Handle device placement\n if device_placement:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/127", "pr_id": 693252365 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 44909ab3c..96648ca65 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -104,6 +104,8 @@ def __init__(\n self.drop_last = batch_sampler.drop_last\n \n def __len__(self):\n+ if self.split_batches:\n+ return len(self.batch_sampler)\n if len(self.batch_sampler) % self.num_processes == 0:\n return len(self.batch_sampler) // self.num_processes\n length = len(self.batch_sampler) // self.num_processes\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/121", "pr_id": 685201522 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 20274b671..281f4e9a7 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -413,8 +413,10 @@ def unscale_gradients(self, optimizer=None):\n optimizer = self._optimizers\n elif not isinstance(optimizer, (tuple, list)):\n optimizer = [optimizer]\n- for optimizer in optimizer:\n- self.scaler.unscale_(optimizer)\n+ for opt in optimizer:\n+ while isinstance(opt, AcceleratedOptimizer):\n+ opt = opt.optimizer\n+ self.scaler.unscale_(opt)\n \n def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \"\"\"\n", "code_comments": [], "context": [ { "body": "The tests are not properly setup :see_no_evil: ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/115", "pr_id": 679070761 }, { "diff": "diff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nindex 4c785b12f..fef2f0c08 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/notebook_launcher.py\n@@ -16,7 +16,8 @@\n import sys\n \n import torch\n-from torch.multiprocessing import start_processes\n+\n+from packaging import version\n \n from .state import AcceleratorState\n from .utils import PrepareForLaunch\n@@ -82,6 +83,14 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n \n if num_processes > 1:\n # Multi-GPU launch\n+ if version.parse(torch.__version__) < version.parse(\"1.5.0\"):\n+ raise ImportError(\n+ \"Using `notebook_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n+ f\"{torch.__version__}.\"\n+ )\n+\n+ from torch.multiprocessing import start_processes\n+\n if len(AcceleratorState._shared_state) > 0:\n raise ValueError(\n \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/108", "pr_id": 671506096 }, { "diff": "diff --git a/README.md b/README.md\nindex ffbf36bbd..78a2ceb2a 100644\n--- a/README.md\n+++ b/README.md\n@@ -44,6 +44,10 @@ limitations under the License.\n <p>Run your *raw* PyTorch training script on any kind of device\n </h3>\n \n+<h3 align=\"center\">\n+ <a href=\"https://hf.co/course\"><img src=\"https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/course_banner.png\"></a>\n+</h3>\n+\n ## Easy to integrate\n \n πŸ€— Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16.\ndiff --git a/docs/source/imgs/course_banner.png b/docs/source/imgs/course_banner.png\nnew file mode 100644\nindex 000000000..45773d164\nBinary files /dev/null and b/docs/source/imgs/course_banner.png differ\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/107", "pr_id": 670432129 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 0bbba3644..30fd8314e 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -102,14 +102,15 @@ def zero_grad(self, set_to_none=None):\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n \n- def step(self):\n+ def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n- xm.optimizer_step(self.optimizer)\n+ optimizer_args = {\"closure\": closure} if closure is not None else {}\n+ xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n- self.scaler.step(self.optimizer)\n+ self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n else:\n- self.optimizer.step()\n+ self.optimizer.step(closure)\n \n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n", "code_comments": [ { "body": "```suggestion\r\n xm.optimizer_step(self.optimizer, optimizer_args={\"closure\": closure})\r\n```\r\nThe [PyTorch XLA doc](https://pytorch.org/xla/release/1.8/index.html#torch_xla.core.xla_model.optimizer_step) seems to suggest we have to use `optimizer_args` to pass this along and it's not the second argument.", "diff_hunk": "@@ -102,14 +102,14 @@ def zero_grad(self, set_to_none=None):\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n \n- def step(self):\n+ def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n- xm.optimizer_step(self.optimizer)\n+ xm.optimizer_step(self.optimizer, dict(closure=closure))", "from_author": false }, { "body": "good catch!", "diff_hunk": "@@ -102,14 +102,14 @@ def zero_grad(self, set_to_none=None):\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n \n- def step(self):\n+ def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n- xm.optimizer_step(self.optimizer)\n+ xm.optimizer_step(self.optimizer, dict(closure=closure))", "from_author": true }, { "body": "Actually thinking a little bit more about it, let's not pass anything if there is no closure:\r\n```suggestion\r\n optimizer_args = {\"closure\": closure} if closure is not None else {}\r\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\r\n```", "diff_hunk": "@@ -102,14 +102,14 @@ def zero_grad(self, set_to_none=None):\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n \n- def step(self):\n+ def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n- xm.optimizer_step(self.optimizer)\n+ xm.optimizer_step(self.optimizer, optimizer_args={\"closure\": closure})", "from_author": false }, { "body": "I concur, this is a little more explicit, but it shouldn't matter (famous last words...) because the base `torch.optim.Optimizer` has a `closure` argument. For the other options, we pass that along anyway. ", "diff_hunk": "@@ -102,14 +102,14 @@ def zero_grad(self, set_to_none=None):\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n \n- def step(self):\n+ def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n- xm.optimizer_step(self.optimizer)\n+ xm.optimizer_step(self.optimizer, optimizer_args={\"closure\": closure})", "from_author": true }, { "body": "If you don't mind committing the suggestion then, we're all good to merge :-)", "diff_hunk": "@@ -102,14 +102,14 @@ def zero_grad(self, set_to_none=None):\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n \n- def step(self):\n+ def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n- xm.optimizer_step(self.optimizer)\n+ xm.optimizer_step(self.optimizer, optimizer_args={\"closure\": closure})", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/105", "pr_id": 670035276 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 2bc1459fd..a700cad62 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -387,16 +387,16 @@ def prepare_data_loader(self, data_loader):\n def prepare_optimizer(self, optimizer):\n return AcceleratedOptimizer(optimizer, device_placement=self.device_placement, scaler=self.scaler)\n \n- def backward(self, loss):\n+ def backward(self, loss, **kwargs):\n \"\"\"\n Use :obj:`accelerator.backward(loss)` in lieu of :obj:`loss.backward()`.\n \"\"\"\n if self.distributed_type == DistributedType.DEEPSPEED:\n- self.deepspeed_engine.backward(loss)\n+ self.deepspeed_engine.backward(loss, **kwargs)\n elif self.scaler is not None:\n- self.scaler.scale(loss).backward()\n+ self.scaler.scale(loss).backward(**kwargs)\n else:\n- loss.backward()\n+ loss.backward(**kwargs)\n \n def unscale_gradients(self, optimizer=None):\n \"\"\"\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/104", "pr_id": 669621064 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex d119013d2..2bc1459fd 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -14,6 +14,7 @@\n \n import gc\n import os\n+from contextlib import contextmanager\n from typing import List, Optional, Union\n \n import torch\n@@ -183,6 +184,33 @@ def use_fp16(self):\n use_fp16 = self.state.use_fp16\n return use_fp16\n \n+ @contextmanager\n+ def local_main_process_first(self):\n+ \"\"\"\n+ Lets the local main process go inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_local_main_process)\n+\n+ @contextmanager\n+ def main_process_first(self):\n+ \"\"\"\n+ Lets the main process go first inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_main_process)\n+\n+ def _goes_first(self, is_main):\n+ if not is_main:\n+ self.wait_for_everyone()\n+\n+ yield\n+\n+ if is_main:\n+ self.wait_for_everyone()\n+\n def print(self, *args, **kwargs):\n \"\"\"\n Use in replacement of :obj:`print()` to only print once per server.\n", "code_comments": [ { "body": "Small nit: can we use `is_main` for consistency here (as well as at line 211 below)?", "diff_hunk": "@@ -183,6 +184,33 @@ def use_fp16(self):\n use_fp16 = self.state.use_fp16\n return use_fp16\n \n+ @contextmanager\n+ def local_main_process_first(self):\n+ \"\"\"\n+ Lets the local main process go inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_local_main_process)\n+\n+ @contextmanager\n+ def main_process_first(self):\n+ \"\"\"\n+ Lets the main process go first inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_main_process)\n+\n+ def _goes_first(self, is_master):\n+ if not is_master:", "from_author": false }, { "body": "Yes sure!", "diff_hunk": "@@ -183,6 +184,33 @@ def use_fp16(self):\n use_fp16 = self.state.use_fp16\n return use_fp16\n \n+ @contextmanager\n+ def local_main_process_first(self):\n+ \"\"\"\n+ Lets the local main process go inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_local_main_process)\n+\n+ @contextmanager\n+ def main_process_first(self):\n+ \"\"\"\n+ Lets the main process go first inside a with block.\n+\n+ The other processes will enter the with block after the main process exits.\n+ \"\"\"\n+ yield from self._goes_first(self.is_main_process)\n+\n+ def _goes_first(self, is_master):\n+ if not is_master:", "from_author": true } ], "context": [ { "body": "Addressed the request.", "from_author": true }, { "body": "Thanks!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/98", "pr_id": 661117028 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex c7706d390..7c12b18e6 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import gc\n from typing import List, Optional, Union\n \n import torch\n@@ -338,6 +339,15 @@ def save(self, obj, f):\n \"\"\"\n save(obj, f)\n \n+ def free_memory(self):\n+ \"\"\"\n+ Will release all references to the internal objects stored and call the garbage collector. You should call this\n+ method between two trainings with different models/optimizers.\n+ \"\"\"\n+ self._optimizers = []\n+ gc.collect()\n+ torch.cuda.empty_cache()\n+\n def _get_named_parameters(self, *args):\n named_parameters = {}\n for obj in args:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/89", "pr_id": 645882788 }, { "diff": "diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex c7706d390..5a4bbe217 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -253,24 +253,36 @@ def backward(self, loss):\n else:\n loss.backward()\n \n- def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n+ def unscale_gradients(self, optimizer=None):\n \"\"\"\n- Should be used in place of :func:`torch.nn.utils.clip_grad_norm_`.\n+ Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.\n+\n+ Args:\n+ optimizer (:obj:`torch.optim.Optimizer` or :obj:`List[torch.optim.Optimizer]`, `optional`):\n+ The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers\n+ that were passed to :meth:`~accelerate.Accelerator.prepare`.\n \"\"\"\n- # TODO: this unscales all optimizers where we should only unscale the one where parameters are.\n if self.state.use_fp16 and self.native_amp:\n- for optimizer in self._optimizers:\n+ if optimizer is None:\n+ # TODO: this unscales all optimizers where we should only unscale the one where parameters are.\n+ optimizer = self._optimizers\n+ elif not isinstance(optimizer, (tuple, list)):\n+ optimizer = [optimizer]\n+ for optimizer in optimizer:\n self.scaler.unscale_(optimizer)\n+\n+ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n+ \"\"\"\n+ Should be used in place of :func:`torch.nn.utils.clip_grad_norm_`.\n+ \"\"\"\n+ self.unscale_gradients()\n torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n \n def clip_grad_value_(self, parameters, clip_value):\n \"\"\"\n Should be used in place of :func:`torch.nn.utils.clip_grad_value_`.\n \"\"\"\n- # TODO: this unscales all optimizers where we should only unscale the one where parameters are.\n- if self.state.use_fp16 and self.native_amp:\n- for optimizer in self._optimizers:\n- self.scaler.unscale_(optimizer)\n+ self.unscale_gradients()\n torch.nn.utils.clip_grad_value_(parameters, clip_value)\n \n def gather(self, tensor):\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/88", "pr_id": 645879064 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex 4d941b5d3..19995fc0a 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -23,7 +23,7 @@ The [nlp_example.py](./nlp_example.py) script is a simple example to train a Ber\n Prior to running it you should install πŸ€— Dataset and πŸ€— Transformers:\n \n ```bash\n-pip install datasets, transformers\n+pip install datasets transformers\n ```\n \n The same script can be run in any of the following configurations:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/87", "pr_id": 645708436 }, { "diff": "diff --git a/README.md b/README.md\nindex 4f6cf5f40..ffbf36bbd 100644\n--- a/README.md\n+++ b/README.md\n@@ -160,6 +160,26 @@ Once you have MPI setup on your cluster, just run:\n mpirun -np 2 python examples/nlp_example.py\n ```\n \n+## Launching training using DeepSpeed\n+\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. to use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.\n+\n+```python\n+from accelerator import Accelerator, DeepSpeedPlugin\n+\n+# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it\n+# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed\n+deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)\n+accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)\n+\n+# How to save your πŸ€— Transformer?\n+accelerator.wait_for_everyone()\n+unwrapped_model = accelerator.unwrap_model(model)\n+unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model))\n+```\n+\n+Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue.\n+\n ## Launching your training from a notebook\n \n πŸ€— Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add:\n@@ -204,3 +224,4 @@ pip install accelerate\n - multi-GPU on several nodes (machines)\n - TPU\n - FP16 with native AMP (apex on the roadmap)\n+- DeepSpeed support (experimental)\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex f793b38c1..f6206df02 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -8,4 +8,4 @@\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs\n from .notebook_launcher import notebook_launcher\n from .state import DistributedType\n-from .utils import synchronize_rng_states\n+from .utils import DeepSpeedPlugin, synchronize_rng_states\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 7b09375df..d119013d2 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import gc\n+import os\n from typing import List, Optional, Union\n \n import torch\n@@ -22,8 +23,26 @@\n from .data_loader import prepare_data_loader\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n-from .state import AcceleratorState, DistributedType\n-from .utils import RNGType, extract_model_from_parallel, gather, pad_across_processes, save, wait_for_everyone\n+from .state import AcceleratorState, DistributedType, is_deepspeed_available\n+from .utils import (\n+ DeepSpeedPlugin,\n+ RNGType,\n+ extract_model_from_parallel,\n+ gather,\n+ pad_across_processes,\n+ save,\n+ wait_for_everyone,\n+)\n+\n+\n+if is_deepspeed_available():\n+ import deepspeed\n+ from .deepspeed_utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper\n+\n+import logging\n+\n+\n+logger = logging.getLogger(__name__)\n \n \n class Accelerator:\n@@ -46,6 +65,9 @@ class Accelerator:\n cpu (:obj:`bool`, `optional`):\n Whether or not to force the script to execute on CPU. Will ignore GPU available if set to :obj:`True` and\n force the execution on one process only.\n+ deepspeed_plugin (:obj:`DeepSpeedPlugin`, `optional`):\n+ Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured\n+ directly using `accelerate config`\n rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n The list of random number generators to synchronize at the beginning of each iteration in your prepared\n dataloaders. Should be one or several of:\n@@ -74,10 +96,18 @@ def __init__(\n split_batches: bool = False,\n fp16: bool = None,\n cpu: bool = False,\n+ deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n- self.state = AcceleratorState(fp16=fp16, cpu=cpu, _from_accelerator=True)\n+ if deepspeed_plugin is None: # init from env variables\n+ deepspeed_plugin = DeepSpeedPlugin() if os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" else None\n+ else:\n+ assert isinstance(\n+ deepspeed_plugin, DeepSpeedPlugin\n+ ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\n+\n+ self.state = AcceleratorState(fp16=fp16, cpu=cpu, deepspeed_plugin=deepspeed_plugin, _from_accelerator=True)\n \n self.device_placement = device_placement\n self.split_batches = split_batches\n@@ -109,6 +139,7 @@ def __init__(\n \n # Internal references to the training objects\n self._optimizers = []\n+ self._models = []\n \n # RNG Types\n if rng_types is None:\n@@ -146,7 +177,11 @@ def is_local_main_process(self):\n \n @property\n def use_fp16(self):\n- return self.state.use_fp16\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ use_fp16 = self.state.deepspeed_plugin.deepspeed_config[\"fp16\"][\"enabled\"]\n+ else:\n+ use_fp16 = self.state.use_fp16\n+ return use_fp16\n \n def print(self, *args, **kwargs):\n \"\"\"\n@@ -159,6 +194,7 @@ def _prepare_one(self, obj):\n if isinstance(obj, torch.utils.data.DataLoader):\n return self.prepare_data_loader(obj)\n elif isinstance(obj, torch.nn.Module):\n+ self._models.append(obj)\n return self.prepare_model(obj)\n elif isinstance(obj, torch.optim.Optimizer):\n optimizer = self.prepare_optimizer(obj)\n@@ -177,7 +213,6 @@ def prepare(self, *args):\n - :obj:`torch.utils.data.DataLoader`: PyTorch Dataloader\n - :obj:`torch.nn.Module`: PyTorch Module\n - :obj:`torch.optim.Optimizer`: PyTorch Optimizer\n-\n \"\"\"\n # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will\n # have parameters disconnected from the model (so no training :-( ).\n@@ -199,7 +234,10 @@ def prepare(self, *args):\n # 1. grabbing old model parameters\n old_named_params = self._get_named_parameters(*args)\n \n- result = tuple(self._prepare_one(obj) for obj in args)\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ result = self._prepare_deepspeed(*args)\n+ else:\n+ result = tuple(self._prepare_one(obj) for obj in args)\n \n if tpu_should_fix_optimizer:\n # 2. grabbing new model parameters\n@@ -231,6 +269,82 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ deepspeed_plugin = self.state.deepspeed_plugin\n+ self.deepspeed_config = deepspeed_plugin.deepspeed_config\n+\n+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ if len(batch_sizes) == 0:\n+ raise ValueError(\n+ \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ )\n+\n+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)\n+ if len(batch_sizes) > 1:\n+ logger.info(\n+ f\"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here \\\n+ {deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\n+ )\n+\n+ self.deepspeed_config[\"train_batch_size\"] = (\n+ batch_size_per_device * deepspeed_plugin.gradient_accumulation_steps * self.num_processes\n+ )\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ if deepspeed_plugin.auto_opt_mapping:\n+ is_adam = isinstance(optimizer, torch.optim.Adam)\n+ is_adamw = isinstance(optimizer, torch.optim.AdamW)\n+ if (is_adam or is_adamw) and deepspeed_plugin.offload_optimizer_device == \"cpu\":\n+ defaults = optimizer.defaults\n+ optimizer = deepspeed.ops.adam.DeepSpeedCPUAdam(\n+ model.parameters(),\n+ lr=defaults[\"lr\"],\n+ bias_correction=True,\n+ betas=defaults[\"betas\"],\n+ eps=defaults[\"eps\"],\n+ weight_decay=defaults[\"weight_decay\"],\n+ amsgrad=defaults[\"amsgrad\"],\n+ adamw_mode=is_adamw,\n+ )\n+\n+ # useful when only eval_dataloader is given into `accelerator.prepare()`\n+ if model is not None:\n+ engine = DeepSpeedEngineWrapper(\n+ args=None,\n+ model=model,\n+ optimizer=optimizer,\n+ config_params=self.deepspeed_config,\n+ dist_init_required=False,\n+ )\n+ for i in range(len(result)):\n+ if isinstance(result[i], torch.nn.Module):\n+ result[i] = engine\n+ elif isinstance(result[i], torch.optim.Optimizer):\n+ result[i] = DeepSpeedOptimizerWrapper(engine.optimizer, engine)\n+ self.deepspeed_engine = engine # pointing for deepspeed_engine.backward()\n+ self._models.append(engine)\n+ self._optimizers.append(engine.optimizer)\n+ assert (\n+ len(self._models) == 1\n+ ), \"You can't use same `Accelerator()` instance with 2 models when using DeepSpeed\"\n+\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ assert hasattr(\n+ self, \"deepspeed_engine\"\n+ ), \"You need to pass the model along the optimizer when using Deepspeed.\"\n+\n+ return tuple(result)\n+\n def prepare_data_loader(self, data_loader):\n return prepare_data_loader(\n data_loader,\n@@ -249,7 +363,9 @@ def backward(self, loss):\n \"\"\"\n Use :obj:`accelerator.backward(loss)` in lieu of :obj:`loss.backward()`.\n \"\"\"\n- if self.scaler is not None:\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ self.deepspeed_engine.backward(loss)\n+ elif self.scaler is not None:\n self.scaler.scale(loss).backward()\n else:\n loss.backward()\n@@ -357,6 +473,8 @@ def free_memory(self):\n method between two trainings with different models/optimizers.\n \"\"\"\n self._optimizers = []\n+ self._models = []\n+ self.deepspeed_engine = None\n gc.collect()\n torch.cuda.empty_cache()\n \n@@ -384,3 +502,21 @@ def _get_devices(self, *args):\n optimizer_device = param_group[\"params\"][0].device\n break\n return (model_device, optimizer_device)\n+\n+ def get_state_dict(self, model):\n+ is_zero_3 = False\n+ if is_deepspeed_available():\n+ if isinstance(model, DeepSpeedEngineWrapper) and self.distributed_type == DistributedType.DEEPSPEED:\n+ is_zero_3 = self.state.deepspeed_plugin.zero_stage == 3\n+\n+ if is_zero_3:\n+ state_dict = model._zero3_consolidated_fp16_state_dict()\n+ else:\n+ model = self.unwrap_model(model)\n+ state_dict = model.state_dict()\n+\n+ for k in state_dict:\n+ if state_dict[k].dtype == torch.float16:\n+ state_dict[k] = state_dict[k].float()\n+\n+ return state_dict\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex ebc176089..eaaad20f2 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -16,6 +16,7 @@\n \n from accelerate.state import ComputeEnvironment, DistributedType\n \n+from ...utils import is_deepspeed_available\n from .config_args import ClusterConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n \n@@ -31,7 +32,7 @@ def get_cluster_input():\n num_machines = 1\n main_process_ip = None\n main_process_port = None\n- if distributed_type == DistributedType.MULTI_GPU or distributed_type == DistributedType.MULTI_CPU:\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n lambda x: int(x),\n@@ -50,6 +51,42 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ deepspeed_config = None\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n+ use_deepspeed = _ask_field(\n+ \"Do you want to use DeepSpeed? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_deepspeed:\n+ distributed_type = DistributedType.DEEPSPEED\n+ assert (\n+ is_deepspeed_available()\n+ ), \"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source\"\n+\n+ deepspeed_config = {}\n+ if distributed_type == DistributedType.DEEPSPEED:\n+ deepspeed_config[\"zero_stage\"] = _ask_field(\n+ \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n+ lambda x: int(x),\n+ default=2,\n+ )\n+\n+ if deepspeed_config[\"zero_stage\"] >= 2:\n+ deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\n+ \"Where to offload optimizer states? [NONE/cpu/nvme]: \",\n+ lambda x: str(x),\n+ default=\"none\",\n+ )\n+\n+ deepspeed_config[\"gradient_accumulation_steps\"] = _ask_field(\n+ \"How many gradient accumulation steps you're passing in your script? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ )\n+\n if distributed_type == DistributedType.TPU:\n main_training_function = _ask_field(\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n@@ -85,4 +122,5 @@ def get_cluster_input():\n main_process_ip=main_process_ip,\n main_process_port=main_process_port,\n main_training_function=main_training_function,\n+ deepspeed_config=deepspeed_config,\n )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 8cc993b59..55b019fa4 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -119,6 +119,9 @@ class ClusterConfig(BaseConfig):\n main_process_port: Optional[int] = None\n main_training_function: str = \"main\"\n \n+ # args for deepspeed_plugin\n+ deepspeed_config: dict = None\n+\n \n @dataclass\n class SageMakerConfig(BaseConfig):\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex e27c6ed5c..4f392d6fa 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -44,6 +44,12 @@ def launch_command_parser(subparsers=None):\n action=\"store_true\",\n help=\"Whether or not this should launch a distributed GPU training.\",\n )\n+ parser.add_argument(\n+ \"--use_deepspeed\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"Whether to use deepspeed.\",\n+ )\n parser.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n@@ -95,6 +101,25 @@ def launch_command_parser(subparsers=None):\n \"script.\"\n ),\n )\n+ parser.add_argument(\n+ \"--zero_stage\",\n+ default=None,\n+ type=int,\n+ help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--offload_optimizer_device\",\n+ default=None,\n+ type=str,\n+ help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+ parser.add_argument(\n+ \"--gradient_accumulation_steps\",\n+ default=None,\n+ type=int,\n+ help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).\",\n+ )\n+\n # Other arguments of the training scripts\n parser.add_argument(\"training_script_args\", nargs=argparse.REMAINDER, help=\"Arguments of the training script.\")\n \n@@ -151,6 +176,43 @@ def multi_gpu_launcher(args):\n raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n \n \n+def deepspeed_launcher(args):\n+\n+ cmd = [\"deepspeed\"]\n+ if args.num_machines > 1:\n+ cmd.extend(\n+ [\n+ \"--num_gpus\",\n+ str(args.num_processes // args.num_machines),\n+ \"--num_nodes\",\n+ str(args.num_machines),\n+ \"--node_rank\",\n+ str(args.machine_rank),\n+ \"--master_addr\",\n+ args.main_process_ip,\n+ \"--master_port\",\n+ str(args.main_process_port),\n+ ]\n+ )\n+ else:\n+ cmd.extend([\"--num_gpus\", str(args.num_processes)])\n+\n+ cmd.append(args.training_script)\n+ cmd.extend(args.training_script_args)\n+\n+ current_env = os.environ.copy()\n+ current_env[\"USE_FP16\"] = str(args.fp16)\n+ current_env[\"USE_DEEPSPEED\"] = \"true\"\n+ current_env[\"DEEPSPEED_ZERO_STAGE\"] = str(args.zero_stage)\n+ current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = str(args.gradient_accumulation_steps)\n+ current_env[\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\"] = str(args.offload_optimizer_device)\n+\n+ process = subprocess.Popen(cmd, env=current_env)\n+ process.wait()\n+ if process.returncode != 0:\n+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n+\n+\n def tpu_launcher(args):\n import torch_xla.distributed.xla_multiprocessing as xmp\n \n@@ -276,19 +338,26 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if args.multi_gpu and args.tpu:\n- raise ValueError(\"You can only pick one between `--multi_gpu` and `--tpu`.\")\n+ if sum([args.multi_gpu, args.tpu, args.use_deepspeed]) > 1:\n+ raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`.\")\n \n defaults = None\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n- if not args.multi_gpu and not args.tpu:\n+ if not args.multi_gpu and not args.tpu and not args.use_deepspeed:\n+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n+ if isinstance(attr, dict):\n+ for k in defaults.deepspeed_config:\n+ if getattr(args, k) is None:\n+ setattr(args, k, defaults.deepspeed_config[k])\n+ continue\n+\n # Those args are handled separately\n if (\n name not in [\"compute_environment\", \"fp16\", \"distributed_type\"]\n@@ -303,7 +372,9 @@ def launch_command(args):\n args.num_processes = 1\n \n # Use the proper launcher\n- if args.multi_gpu and not args.cpu:\n+ if args.use_deepspeed and not args.cpu:\n+ deepspeed_launcher(args)\n+ elif args.multi_gpu and not args.cpu:\n multi_gpu_launcher(args)\n elif args.tpu and not args.cpu:\n tpu_launcher(args)\ndiff --git a/src/accelerate/deepspeed_utils.py b/src/accelerate/deepspeed_utils.py\nnew file mode 100644\nindex 000000000..11897ddc7\n--- /dev/null\n+++ b/src/accelerate/deepspeed_utils.py\n@@ -0,0 +1,96 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from .optimizer import AcceleratedOptimizer\n+from .state import is_apex_available, is_deepspeed_available\n+\n+\n+if is_deepspeed_available():\n+ from deepspeed import DeepSpeedEngine\n+\n+if is_apex_available():\n+ import amp\n+\n+\n+class DeepSpeedEngineWrapper(DeepSpeedEngine):\n+ \"\"\"\n+ Wrapper over deepspeed.DeepSpeedEngine object\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+\n+ # overwriting micro_steps for user's gradient_accumulation\n+ self.micro_steps = -1\n+\n+ def step(self, lr_kwargs=None):\n+ \"\"\"DeepSpeedEngine.step() without `micro_steps` update & no profiling\"\"\"\n+ if self.is_gradient_accumulation_boundary(): # it shouldn't matter whether we keep this line or not\n+ if self.progressive_layer_drop:\n+ self.progressive_layer_drop.update_state(self.global_steps)\n+\n+ self._take_model_step(lr_kwargs)\n+\n+ def backward(self, loss):\n+ \"\"\"DeepSpeedEngine.backward() with with no loss scaling; no profiling but with `micro_steps` update\"\"\"\n+\n+ if self.zero_optimization():\n+ self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()\n+ self.optimizer.backward(loss)\n+ elif self.amp_enabled():\n+ # AMP requires delaying unscale when inside gradient accumulation boundaries\n+ # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n+ delay_unscale = not self.is_gradient_accumulation_boundary()\n+ with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:\n+ scaled_loss.backward()\n+ elif self.fp16_enabled():\n+ self.optimizer.backward(loss)\n+ else:\n+ loss.backward()\n+\n+ if self.enable_backward_allreduce:\n+ self.allreduce_gradients()\n+\n+ # this will ensure deepspeed gradient_accumulation matches user's accumulation\n+ self.micro_steps += 1\n+\n+\n+class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):\n+ \"\"\"\n+ Internal wrapper around a deepspeed optimizer.\n+\n+ Args:\n+ optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n+ The optimizer to wrap.\n+ \"\"\"\n+\n+ def __init__(self, optimizer, model: DeepSpeedEngineWrapper):\n+ super().__init__(optimizer, device_placement=False, scaler=None)\n+\n+ self.model = model\n+\n+ def zero_grad(self, set_to_none=None):\n+ pass # `model.step()` is doing that automatically. Therefore, it's implementation is not needed\n+\n+ def step(self):\n+ \"\"\"This will handle optimizer.step() & optimizer.zero_grad() with gradient_accumulation\"\"\"\n+ self.model.step()\n+\n+ @property\n+ def is_overflow(self):\n+ \"\"\"This must be called before lr_scheduler.step() when using deepspeed with fp16\"\"\"\n+ overflow = False\n+ if hasattr(self.optimizer, \"overflow\"):\n+ overflow = self.optimizer.overflow\n+ return overflow\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 1dd9f506a..0bbba3644 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -114,3 +114,8 @@ def step(self):\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n+\n+ @property\n+ def is_overflow(self):\n+ \"\"\"This needs to be implemented at the end\"\"\"\n+ return False # TODO: implement it\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 76376038b..5b349fc38 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import importlib\n import os\n from distutils.util import strtobool\n from enum import Enum\n@@ -48,10 +49,18 @@ def is_ccl_available():\n return _ccl_available\n \n \n+def is_apex_available():\n+ return importlib.util.find_spec(\"apex\") is not None\n+\n+\n def is_tpu_available():\n return _tpu_available\n \n \n+def is_deepspeed_available():\n+ return importlib.util.find_spec(\"deepspeed\") is not None\n+\n+\n def parse_flag_from_env(key, default=False):\n value = os.environ.get(key, str(default))\n return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...\n@@ -66,6 +75,7 @@ class DistributedType(str, Enum):\n - **NO** -- Not a distributed environment, just a single process.\n - **MULTI_CPU** -- Distributed on multiple CPU nodes.\n - **MULTI_GPU** -- Distributed on multiple GPUs.\n+ - **DEEPSPEED** -- Using DeepSpeed.\n - **TPU** -- Distributed on TPUs.\n \"\"\"\n \n@@ -73,6 +83,7 @@ class DistributedType(str, Enum):\n NO = \"NO\"\n MULTI_CPU = \"MULTI_CPU\"\n MULTI_GPU = \"MULTI_GPU\"\n+ DEEPSPEED = \"DEEPSPEED\"\n TPU = \"TPU\"\n \n \n@@ -127,10 +138,11 @@ class AcceleratorState:\n \n _shared_state = {}\n \n- def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool = False):\n+ def __init__(self, fp16: bool = None, cpu: bool = False, deepspeed_plugin=None, _from_accelerator: bool = False):\n self.__dict__ = self._shared_state\n if not getattr(self, \"initialized\", False):\n self.backend = None\n+ self.deepspeed_plugin = None\n if not _from_accelerator:\n raise ValueError(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n@@ -143,6 +155,23 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.local_process_index = xm.get_local_ordinal()\n self.device = xm.xla_device()\n self.use_fp16 = False\n+ elif os.environ.get(\"USE_DEEPSPEED\", \"false\") == \"true\" and not cpu:\n+ assert (\n+ is_deepspeed_available()\n+ ), \"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source\"\n+ self.distributed_type = DistributedType.DEEPSPEED\n+ if not torch.distributed.is_initialized():\n+ torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n+ self.num_processes = torch.distributed.get_world_size()\n+ self.process_index = torch.distributed.get_rank()\n+ self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n+ self.device = torch.device(\"cuda\", self.local_process_index)\n+ torch.cuda.set_device(self.device)\n+ self.use_fp16 = False # deepspeed handles fp16 using deepspeed_config\n+ fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ deepspeed_plugin.deepspeed_config.update({\"fp16\": {\"enabled\": fp16}})\n+ self.deepspeed_plugin = deepspeed_plugin\n elif int(os.environ.get(\"LOCAL_RANK\", -1)) != -1 and not cpu:\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n@@ -200,11 +229,15 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.initialized = True\n \n def __repr__(self):\n- return (\n+ use_fp16 = self.deepspeed_plugin.fp16 if self.distributed_type == DistributedType.DEEPSPEED else self.use_fp16\n+ repr = (\n f\"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\\n\"\n f\"Num processes: {self.num_processes}\\n\"\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\n f\"Device: {self.device}\\n\"\n- f\"Use FP16 precision: {self.use_fp16}\\n\"\n+ f\"Use FP16 precision: {use_fp16}\\n\"\n )\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ repr += f\"ds_config: {self.deepspeed_plugin.ds_config}\\n\"\n+ return repr\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 7302ead15..6edd1f1c0 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -15,13 +15,14 @@\n import importlib\n import os\n import random\n+from dataclasses import dataclass, field\n from enum import Enum\n from typing import List, Optional, Union\n \n import numpy as np\n import torch\n \n-from .state import AcceleratorState, DistributedType, is_tpu_available\n+from .state import AcceleratorState, DistributedType, is_deepspeed_available, is_tpu_available\n \n \n if is_tpu_available():\n@@ -36,6 +37,10 @@ def is_sagemaker_available():\n return importlib.util.find_spec(\"sagemaker\") is not None\n \n \n+if is_deepspeed_available():\n+ from deepspeed import DeepSpeedEngine\n+\n+\n class RNGType(Enum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\n@@ -142,7 +147,11 @@ def extract_model_from_parallel(model):\n Returns:\n :obj:`torch.nn.Module`: The extracted model.\n \"\"\"\n- while isinstance(model, (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)):\n+ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n+ if is_deepspeed_available():\n+ options += (DeepSpeedEngine,)\n+\n+ while isinstance(model, options):\n model = model.module\n return model\n \n@@ -251,6 +260,7 @@ def wait_for_everyone():\n if (\n AcceleratorState().distributed_type == DistributedType.MULTI_GPU\n or AcceleratorState().distributed_type == DistributedType.MULTI_CPU\n+ or AcceleratorState().distributed_type == DistributedType.DEEPSPEED\n ):\n torch.distributed.barrier()\n elif AcceleratorState().distributed_type == DistributedType.TPU:\n@@ -293,3 +303,49 @@ def __call__(self, index, *args):\n os.environ[\"RANK\"] = str(index)\n \n self.launcher(*args)\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )\n+\n+ auto_opt_mapping: bool = field(\n+ default=True,\n+ metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n+ )\n+\n+ offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n+\n+ def __post_init__(self):\n+\n+ if self.gradient_accumulation_steps is None:\n+ self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+\n+ if self.zero_stage is None:\n+ self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+\n+ if self.offload_optimizer_device is None:\n+ self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+\n+ self.deepspeed_config = {\n+ \"train_batch_size\": None,\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\n+ \"stage\": self.zero_stage,\n+ \"offload_optimizer\": {\n+ \"device\": self.offload_optimizer_device,\n+ },\n+ },\n+ \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ }\n", "code_comments": [ { "body": "```suggestion\r\n assert isinstance(deepspeed_plugin, DeepSpeedPlugin), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"\r\n```", "diff_hunk": "@@ -86,18 +85,10 @@ def __init__(\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n+ if deepspeed_plugin is not None:\n+ assert isinstance(deepspeed_plugin, DeepSpeedPlugin), \"deepspeed_plugin must be instance of DeepSpeedPlugin\"", "from_author": false }, { "body": "```suggestion\r\n assert len(batch_size) > 0, \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\r\n```", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"", "from_author": false }, { "body": "Rename to batch_sizes (since there are several of them)?", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]", "from_author": false }, { "body": "```suggestion\r\n logger.info(\"Since you passed several dataloaders, `is_train_batch_min` will determine the `train_batch_size`\".)\r\n```\r\n\r\n+ I think we should check the list has more than one element before issuing this info!", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")", "from_author": false }, { "body": "Nothing needs to be done for the `eval_batch_size`?", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+", "from_author": false }, { "body": "I don't understand this part, could you explain?", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None", "from_author": false }, { "body": "The error message should be more along the lines of \"You need to pass the model along the optimizer when using Deepspeed.\"", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None\n+\n+ # useful when only eval_dataloader is given into `accelerator.prepare()`\n+ if model is not None:\n+ model, optimizer, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config_params=self.ds_config, dist_init_required=False, model_parameters=model_parameters)\n+ assert isinstance(model, deepspeed.DeepSpeedEngine), \"PipelineEngine not supported currently\"\n+ for i in range(len(result)):\n+ if isinstance(result[i], torch.nn.Module):\n+ result[i] = DeepSpeedEngineWrapper(model)\n+ elif isinstance(result[i], torch.optim.Optimizer):\n+ result[i] = DeepSpeedOptimizerWrapper(optimizer, model)\n+ self.model = model # pointing for model.backward()\n+ \n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ assert hasattr(self, \"model\"), \"Accelerator instance must have model as its attribute\"", "from_author": false }, { "body": "I'm not sure I understand this error message.", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None\n+\n+ # useful when only eval_dataloader is given into `accelerator.prepare()`\n+ if model is not None:\n+ model, optimizer, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config_params=self.ds_config, dist_init_required=False, model_parameters=model_parameters)\n+ assert isinstance(model, deepspeed.DeepSpeedEngine), \"PipelineEngine not supported currently\"", "from_author": false }, { "body": "Great to have this handled here :-) ", "diff_hunk": "@@ -248,7 +308,9 @@ def backward(self, loss):\n \"\"\"\n Use :obj:`accelerator.backward(loss)` in lieu of :obj:`loss.backward()`.\n \"\"\"\n- if self.scaler is not None:\n+ if self.distributed_type == DistributedType.DEEPSPEED:\n+ self.model.backward(loss)", "from_author": false }, { "body": "I think deepspeed is a subcase of multi-CPU/multi-GPU no? I would rather ask the question after.", "diff_hunk": "@@ -16,22 +16,22 @@\n \n from accelerate.state import ComputeEnvironment, DistributedType\n \n-from .config_args import ClusterConfig\n+from .config_args import ClusterConfig, DeepSpeedConfig\n from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n \n \n def get_cluster_input():\n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): \",\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] DeepSpeed [4] TPU): \",\n _convert_distributed_mode,\n- error_message=\"Please enter 0, 1, 2 or 3.\",\n+ error_message=\"Please enter 0, 1, 2, 3 or 4.\",", "from_author": false }, { "body": "I think this one should just be left at True: there are going to be tons of parameters for deepspeed so let's keep the essential ones in the question (and use the default for the others). Users can edit the config after!", "diff_hunk": "@@ -50,6 +50,25 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ # TODO: add other deepspeed args\n+ if distributed_type == DistributedType.DEEPSPEED:\n+ zero_stage = _ask_field(\n+ \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2)? [2]: \",\n+ lambda x: int(x),\n+ default=2,\n+ )\n+ gradient_accumulation_steps = _ask_field(\n+ \"What should be your number of gradient accumulation steps? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ )\n+ is_train_batch_min = _ask_field(\n+ \"Should accelerate choose minimum batch size incase both train & eval dataloaders are passed? [True]: \",", "from_author": false }, { "body": "Why add this default?", "diff_hunk": "@@ -111,13 +111,25 @@ def __post_init__(self):\n \n @dataclass\n class ClusterConfig(BaseConfig):\n- num_processes: int\n+ num_processes: int = None\n machine_rank: int = 0\n num_machines: int = 1\n main_process_ip: Optional[str] = None\n main_process_port: Optional[int] = None\n main_training_function: str = \"main\"\n \n+ def __post_init__(self):\n+ super().__post_init__()\n+ assert self.num_processes is not None, \"num_processes can't be None\"", "from_author": false }, { "body": "Are those all expected by deepspeed?", "diff_hunk": "@@ -151,6 +151,44 @@ def multi_gpu_launcher(args):\n raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n \n \n+def deepspeed_launcher(args):\n+ # TODO: need to complete this\n+\n+ cmd = [\"deepspeed\"]\n+ if args.num_machines > 1:\n+ cmd.extend(\n+ [\n+ \"--num_gpus\",\n+ str(args.num_processes // args.num_machines),\n+ \"--num_nodes\",\n+ str(args.num_machines),\n+ \"--node_rank\",\n+ str(args.machine_rank),\n+ \"--master_addr\",\n+ args.main_process_ip,\n+ \"--master_port\",\n+ str(args.main_process_port),\n+ ]\n+ )\n+ else:\n+ cmd.extend([\"--num_gpus\", str(args.num_processes)])\n+\n+ cmd.append(args.training_script)\n+ cmd.extend(args.training_script_args)\n+\n+ current_env = os.environ.copy()\n+ current_env[\"USE_FP16\"] = str(args.fp16)\n+ current_env[\"ZERO_STAGE\"] = int(args.zero_stage)\n+ current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = int(args.gradient_accumulation_steps)\n+ current_env[\"IS_TRAIN_BATCH_MIN\"] = bool(args.is_train_batch_min)", "from_author": false }, { "body": "There is something done in Transformers for that, if you need help.", "diff_hunk": "@@ -276,14 +314,15 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if args.multi_gpu and args.tpu:\n+ if args.multi_gpu and args.tpu: # TODO: fix this for deepspeed", "from_author": false }, { "body": "Make sure to use the `make style` command, this is not super readable here :-)", "diff_hunk": "@@ -0,0 +1,177 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import torch\n+import torch.nn as nn\n+from dataclasses import dataclass, field\n+\n+from .state import is_apex_available\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(default=1, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"})\n+ zero_stage: int = field(default=0, metadata={\"help\": \"Possible options are 0,1,2\"})\n+ is_train_batch_min: str = field(default=True, metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"})", "from_author": false }, { "body": "This is copied from `deepspeed`? Could we subclass their class then and only change the method that requires it?", "diff_hunk": "@@ -0,0 +1,177 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import torch\n+import torch.nn as nn\n+from dataclasses import dataclass, field\n+\n+from .state import is_apex_available\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(default=1, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"})\n+ zero_stage: int = field(default=0, metadata={\"help\": \"Possible options are 0,1,2\"})\n+ is_train_batch_min: str = field(default=True, metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"})\n+\n+ fp16: bool = field(repr=False, default=False, metadata={\"help\": \"You need not define this here\"})\n+\n+ def __post_init__(self):\n+ self.ds_config = {\n+ \"train_batch_size\": None,\n+ \"fp16\": {\"enabled\": False if self.fp16 is None else self.fp16},\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\"stage\": self.zero_stage},\n+ \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ }\n+\n+\n+class DeepSpeedEngineWrapper(nn.Module):", "from_author": false }, { "body": "Should this subclass the `AcceleratedOptimizer` for most properties and only overrides the necessary ones?", "diff_hunk": "@@ -0,0 +1,177 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import torch\n+import torch.nn as nn\n+from dataclasses import dataclass, field\n+\n+from .state import is_apex_available\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(default=1, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"})\n+ zero_stage: int = field(default=0, metadata={\"help\": \"Possible options are 0,1,2\"})\n+ is_train_batch_min: str = field(default=True, metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"})\n+\n+ fp16: bool = field(repr=False, default=False, metadata={\"help\": \"You need not define this here\"})\n+\n+ def __post_init__(self):\n+ self.ds_config = {\n+ \"train_batch_size\": None,\n+ \"fp16\": {\"enabled\": False if self.fp16 is None else self.fp16},\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\"stage\": self.zero_stage},\n+ \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ }\n+\n+\n+class DeepSpeedEngineWrapper(nn.Module):\n+ \"\"\"\n+ Internal wrapper around DeepSpeedEngine instance\n+\n+ Args:\n+ model (:obj: `DeepSpeedEngine`):\n+ DeepSpeedEngine instance created using deepspeed.initalize()\n+ \"\"\"\n+\n+ def __init__(self, model):\n+ super().__init__()\n+ self.model = model\n+ self.optimizer = model.optimizer\n+\n+ self.progressive_layer_drop = model.progressive_layer_drop\n+ self.global_steps = model.global_steps\n+\n+ # overwriting micro_steps for user's gradient_accumulation\n+ self.model.micro_steps = -1\n+\n+ def forward(self, *args, **kwargs):\n+ return self.model(*args, **kwargs)\n+\n+ def train(self, *args, **kwargs):\n+ return self.model.train(*args, **kwargs)\n+\n+ def eval(self, *args, **kwargs):\n+ return self.model.eval(*args, **kwargs)\n+\n+ def is_gradient_accumulation_boundary(self):\n+ return self.model.is_gradient_accumulation_boundary()\n+\n+ def zero_optimization(self):\n+ return self.model.zero_optimization()\n+\n+ def amp_enabled(self):\n+ return self.model.amp_enabled()\n+\n+ def fp16_enabled(self):\n+ return self.model.fp16_enabled()\n+\n+ def _take_model_step(self, *args, **kwargs):\n+ return self.model._take_model_step(*args, **kwargs)\n+\n+ def step(self, lr_kwargs=None):\n+ \"\"\" DeepSpeedEngine.step() without `micro_steps` update & no profiling \"\"\"\n+ if self.is_gradient_accumulation_boundary(): # it shouldn't matter whether we keep this line or not\n+ if self.progressive_layer_drop:\n+ self.progressive_layer_drop.update_state(self.global_steps)\n+\n+ self._take_model_step(lr_kwargs)\n+\n+ def backward(self, loss):\n+ \"\"\" DeepSpeedEngine.backward() with no loss scaling & no profiling \"\"\"\n+ if self.zero_optimization():\n+ self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(\n+ )\n+ self.optimizer.backward(loss)\n+ elif self.amp_enabled():\n+ assert is_apex_available, \"You have enabled apex in deepspeed_plugin, but apex is unavailable in your machine\"\n+ # AMP requires delaying unscale when inside gradient accumulation boundaries\n+ # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n+ delay_unscale = not self.is_gradient_accumulation_boundary()\n+ with amp.scale_loss(loss,\n+ self.optimizer,\n+ delay_unscale=delay_unscale) as scaled_loss:\n+ scaled_loss.backward()\n+ elif self.fp16_enabled():\n+ self.optimizer.backward(loss)\n+ else:\n+ loss.backward()\n+\n+ # this will ensure deepspeed gradient_accumulation matches user's accumulation\n+ self.model.micro_steps += 1\n+\n+ def save_checkpoint(self, *args, **kwargs):\n+ return self.model.save_checkpoint(*args, **kwargs)\n+\n+ def load_checkpoint(self, *args, **kwargs):\n+ return self.model.load_checkpoint(*args, **kwargs)\n+\n+\n+class DeepSpeedOptimizerWrapper(torch.optim.Optimizer):", "from_author": false }, { "body": "I can do this next week!", "diff_hunk": "@@ -113,3 +113,8 @@ def step(self):\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n+\n+ @property\n+ def is_overflow(self):\n+ \"\"\" This needs to be implemented at the end \"\"\"\n+ return False # TODO: implement it", "from_author": false }, { "body": "Apex is not supported yet. Where do you need it?", "diff_hunk": "@@ -34,6 +35,20 @@\n except ImportError:\n _tpu_available = False\n \n+try:\n+ import deepspeed\n+\n+ _deepspeed_available = True\n+except:\n+ _deepspeed_available = False\n+\n+try:\n+ from apex import amp\n+\n+ _apex_available = True\n+except ImportError:\n+ _apex_available = False\n+", "from_author": false }, { "body": "I would prefer not importing directly (see [here](https://github.com/huggingface/accelerate/blob/a96fbaaf19e650da101bfadb886ecc1d1b1cb205/src/accelerate/utils.py#L31) for an example on how to this).", "diff_hunk": "@@ -34,6 +35,20 @@\n except ImportError:\n _tpu_available = False\n \n+try:\n+ import deepspeed\n+\n+ _deepspeed_available = True\n+except:\n+ _deepspeed_available = False", "from_author": false }, { "body": "Yes, we can do that I think. Then we won't need `deepspeed.initialize()` since deepspeed.initialize() is doing just sending everything to `DeepSpeedEngine` (see [this](https://github.com/microsoft/DeepSpeed/blob/46f4573b1a8a9cda2b45f4de4e90b631cee3f80b/deepspeed/__init__.py#L119)).", "diff_hunk": "@@ -0,0 +1,177 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import torch\n+import torch.nn as nn\n+from dataclasses import dataclass, field\n+\n+from .state import is_apex_available\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(default=1, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"})\n+ zero_stage: int = field(default=0, metadata={\"help\": \"Possible options are 0,1,2\"})\n+ is_train_batch_min: str = field(default=True, metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"})\n+\n+ fp16: bool = field(repr=False, default=False, metadata={\"help\": \"You need not define this here\"})\n+\n+ def __post_init__(self):\n+ self.ds_config = {\n+ \"train_batch_size\": None,\n+ \"fp16\": {\"enabled\": False if self.fp16 is None else self.fp16},\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\"stage\": self.zero_stage},\n+ \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ }\n+\n+\n+class DeepSpeedEngineWrapper(nn.Module):", "from_author": true }, { "body": "yes in `deepspeed.initialize()` as `config_params`", "diff_hunk": "@@ -151,6 +151,44 @@ def multi_gpu_launcher(args):\n raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n \n \n+def deepspeed_launcher(args):\n+ # TODO: need to complete this\n+\n+ cmd = [\"deepspeed\"]\n+ if args.num_machines > 1:\n+ cmd.extend(\n+ [\n+ \"--num_gpus\",\n+ str(args.num_processes // args.num_machines),\n+ \"--num_nodes\",\n+ str(args.num_machines),\n+ \"--node_rank\",\n+ str(args.machine_rank),\n+ \"--master_addr\",\n+ args.main_process_ip,\n+ \"--master_port\",\n+ str(args.main_process_port),\n+ ]\n+ )\n+ else:\n+ cmd.extend([\"--num_gpus\", str(args.num_processes)])\n+\n+ cmd.append(args.training_script)\n+ cmd.extend(args.training_script_args)\n+\n+ current_env = os.environ.copy()\n+ current_env[\"USE_FP16\"] = str(args.fp16)\n+ current_env[\"ZERO_STAGE\"] = int(args.zero_stage)\n+ current_env[\"GRADIENT_ACCUMULATION_STEPS\"] = int(args.gradient_accumulation_steps)\n+ current_env[\"IS_TRAIN_BATCH_MIN\"] = bool(args.is_train_batch_min)", "from_author": true }, { "body": "`deepspeed.initialize()` passes arguments to either of 2 class - `DeepSpeedEngine` or `PipelineEngine`. & I had to make changes to DeepSpeedEngine for preventing automatic gradient_accumulation inside deepspeed (in `DeepSpeedEngineWrapper`) & hence only `DeepSpeedEngine` is supported right now. But this line can be removed if we directly inherit `DeepSpeedEngineWrapper` from `DeepSpeedEngine` directly (as pointed out by you in a later comment).", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None\n+\n+ # useful when only eval_dataloader is given into `accelerator.prepare()`\n+ if model is not None:\n+ model, optimizer, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config_params=self.ds_config, dist_init_required=False, model_parameters=model_parameters)\n+ assert isinstance(model, deepspeed.DeepSpeedEngine), \"PipelineEngine not supported currently\"", "from_author": true }, { "body": "DeepSpeed can build optimizer automatically for us if optimizer key is included in `ds_config`, instead of giving `torch.optim.Optimizer` in `deepspeed.initialize()`. Passing optimizer dict is the recommended way by deepspeed since they have some fused implementation & it's more easy to switch their optimizers. So we need to include that somehow.\r\n\r\nSo one way is to let user pass optimizer as dict in accelerate.prepare() itself & later we can handle that; while other can be to include it in deepspeed_plugin. What way you would suggest??", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None", "from_author": true }, { "body": "its needed in DeepSpeedEngineWrapper.", "diff_hunk": "@@ -34,6 +35,20 @@\n except ImportError:\n _tpu_available = False\n \n+try:\n+ import deepspeed\n+\n+ _deepspeed_available = True\n+except:\n+ _deepspeed_available = False\n+\n+try:\n+ from apex import amp\n+\n+ _apex_available = True\n+except ImportError:\n+ _apex_available = False\n+", "from_author": true }, { "body": "I need to fix it a bit. Will work on it.", "diff_hunk": "@@ -111,13 +111,25 @@ def __post_init__(self):\n \n @dataclass\n class ClusterConfig(BaseConfig):\n- num_processes: int\n+ num_processes: int = None\n machine_rank: int = 0\n num_machines: int = 1\n main_process_ip: Optional[str] = None\n main_process_port: Optional[int] = None\n main_training_function: str = \"main\"\n \n+ def __post_init__(self):\n+ super().__post_init__()\n+ assert self.num_processes is not None, \"num_processes can't be None\"", "from_author": true }, { "body": "no", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+", "from_author": true }, { "body": "Let's focus on this on another PR. We could have accelerate support this in general (since the goal is to have the same API for everything) but it would require more work.", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None", "from_author": false }, { "body": "Ok, then please use the same as below and not a direct import.", "diff_hunk": "@@ -34,6 +35,20 @@\n except ImportError:\n _tpu_available = False\n \n+try:\n+ import deepspeed\n+\n+ _deepspeed_available = True\n+except:\n+ _deepspeed_available = False\n+\n+try:\n+ from apex import amp\n+\n+ _apex_available = True\n+except ImportError:\n+ _apex_available = False\n+", "from_author": false }, { "body": "okay. another PR after this then :)", "diff_hunk": "@@ -230,6 +245,51 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ batch_size = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert len(batch_size) > 0, \"You must specify training_dataloader in `accelerate.prepare()` when using DeepSpeed\"\n+ logger.info(\"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\")\n+ batch_size = min(batch_size) if ds_plugin.is_train_batch_min else max(batch_size)\n+\n+ self.ds_config[\"train_batch_size\"] = batch_size * ds_plugin.gradient_accumulation_steps * self.num_processes\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ model_parameters = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj\n+\n+ # TODO: how dict based optimizer should be passed (in optimizer or ds_plugin) ??\n+ # if isinstance(optimizer, dict):\n+ # self.ds_config.update({\"optimizer\": optimizer})\n+ # optimizer = None", "from_author": true }, { "body": "Can we use `batch_sizes` here?", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ bz = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]", "from_author": false }, { "body": "It think this one would read better as (including the suggested rename form before):\r\n```suggestion\r\n if len(batch_sizes) == 0:\r\n raise ValueError(\"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\")\r\n```", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ bz = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert (\n+ len(bz) > 0\n+ ), \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"", "from_author": false }, { "body": "Since we are computing it just after, can we log it? the log should be something like:\r\n```\r\nf\"Since you passed both train and evaluation dataloader, `is_train_batch_min` (here {ds_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).\"\r\n```\r\nThis might need to be split in two lines as well to respect the 119 char limit.", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ bz = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert (\n+ len(bz) > 0\n+ ), \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ if len(bz) > 1:\n+ logger.info(\n+ \"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\"\n+ )\n+ batch_size_per_device = min(bz) if ds_plugin.is_train_batch_min else max(bz)", "from_author": false }, { "body": "There can be several models or several optimizers passed, so we should make a list of models and optimizers for that case here? If deepspeed only supports one, we should raise an error here.", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config\n+\n+ bz = [obj.batch_size for obj in args if hasattr(obj, \"batch_size\")]\n+ assert (\n+ len(bz) > 0\n+ ), \"You must specify a training or evaluation dataloader in `accelerate.prepare()` when using DeepSpeed.\"\n+ if len(bz) > 1:\n+ logger.info(\n+ \"Since you passed both train & eval dataloader, `is_train_batch_min` will decide the `train_batch_size`\"\n+ )\n+ batch_size_per_device = min(bz) if ds_plugin.is_train_batch_min else max(bz)\n+\n+ self.ds_config[\"train_batch_size\"] = (\n+ batch_size_per_device * ds_plugin.gradient_accumulation_steps * self.num_processes\n+ )\n+\n+ result = [self._prepare_one(obj) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]\n+\n+ model = None\n+ optimizer = None\n+ for obj in result:\n+ if isinstance(obj, torch.nn.Module):\n+ model = obj\n+ elif isinstance(obj, (torch.optim.Optimizer, dict)):\n+ optimizer = obj", "from_author": false }, { "body": "Is this the only reference we create? It seems so but I want to be certain.", "diff_hunk": "@@ -357,6 +447,7 @@ def free_memory(self):\n method between two trainings with different models/optimizers.\n \"\"\"\n self._optimizers = []\n+ self.deepspeed_engine = None", "from_author": false }, { "body": "```suggestion\r\n \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2)? [0]: \",\r\n```\r\nShould include 3?", "diff_hunk": "@@ -50,6 +50,39 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ ds_config = None\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n+ use_deepspeed = _ask_field(\n+ \"Do you want to use DeepSpeed (yes/no)? [NO]\",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_deepspeed:\n+ distributed_type = DistributedType.DEEPSPEED\n+\n+ ds_config = {}\n+ if distributed_type == DistributedType.DEEPSPEED:\n+ ds_config[\"zero_stage\"] = _ask_field(\n+ \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2)? [0]: \",", "from_author": false }, { "body": "Should this be a more general check of whether `attr` is a dict?", "diff_hunk": "@@ -276,19 +338,26 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if args.multi_gpu and args.tpu:\n- raise ValueError(\"You can only pick one between `--multi_gpu` and `--tpu`.\")\n+ if sum([args.multi_gpu, args.tpu, args.use_deepspeed]) > 1:\n+ raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`.\")\n \n defaults = None\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n- if not args.multi_gpu and not args.tpu:\n+ if not args.multi_gpu and not args.tpu and not args.use_deepspeed:\n+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n+ if name == \"ds_config\":", "from_author": false }, { "body": "If I am faster than you and implement it for more general usecases, please rebase. If I'm not I will do this after merging :-)", "diff_hunk": "@@ -114,3 +114,8 @@ def step(self):\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n+\n+ @property\n+ def is_overflow(self):\n+ \"\"\"This needs to be implemented at the end\"\"\"\n+ return False # TODO: implement it", "from_author": false }, { "body": "This file should be removed afterward :-) ", "diff_hunk": "@@ -0,0 +1,461 @@\n+# coding=utf-8", "from_author": false }, { "body": "Can we say `self.deepspeed_config` to go with `self.state.deepspeed_plugin`?\r\n\r\nAlso would love if this function was using `deepspeed_plugin` instead of `ds_pluging`.", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config", "from_author": false }, { "body": "value of `ds_config` will always be a dict since I am defining it to be `dict` when taking deepspeed arguments from `accelerate config`.", "diff_hunk": "@@ -276,19 +338,26 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if args.multi_gpu and args.tpu:\n- raise ValueError(\"You can only pick one between `--multi_gpu` and `--tpu`.\")\n+ if sum([args.multi_gpu, args.tpu, args.use_deepspeed]) > 1:\n+ raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`.\")\n \n defaults = None\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n- if not args.multi_gpu and not args.tpu:\n+ if not args.multi_gpu and not args.tpu and not args.use_deepspeed:\n+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n+ if name == \"ds_config\":", "from_author": true }, { "body": "will remove it in the very end (before merge)", "diff_hunk": "@@ -0,0 +1,461 @@\n+# coding=utf-8", "from_author": true }, { "body": "I didn't understand the question. Could you please clarify??\r\n\r\nI will rename `ds_plugin` to `deepspeed_plugin`.", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config", "from_author": true }, { "body": "I would prefer we use `deepspeed_plugin` and `deepspeed_config` instead of `ds_plugin` and `ds_config`", "diff_hunk": "@@ -231,6 +255,70 @@ def prepare_model(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\n \n+ def _prepare_deepspeed(self, *args):\n+\n+ ds_plugin = self.state.deepspeed_plugin\n+ self.ds_config = ds_plugin.ds_config", "from_author": false }, { "body": "Yes, that's why I'm suggesting we check `if isinstance(attr, dict)` instead of the name.", "diff_hunk": "@@ -276,19 +338,26 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n \n def launch_command(args):\n # Sanity checks\n- if args.multi_gpu and args.tpu:\n- raise ValueError(\"You can only pick one between `--multi_gpu` and `--tpu`.\")\n+ if sum([args.multi_gpu, args.tpu, args.use_deepspeed]) > 1:\n+ raise ValueError(\"You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`.\")\n \n defaults = None\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n- if not args.multi_gpu and not args.tpu:\n+ if not args.multi_gpu and not args.tpu and not args.use_deepspeed:\n+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n+ if name == \"ds_config\":", "from_author": false }, { "body": "yes", "diff_hunk": "@@ -357,6 +447,7 @@ def free_memory(self):\n method between two trainings with different models/optimizers.\n \"\"\"\n self._optimizers = []\n+ self.deepspeed_engine = None", "from_author": true }, { "body": "This sentence is a bit ambiguous: do I have to change something in my code? I'd either say \"You don't need to change anything. If you desire to tweak your deepspeed related args, we provide ....\"", "diff_hunk": "@@ -160,6 +160,26 @@ Once you have MPI setup on your cluster, just run:\n mpirun -np 2 python examples/nlp_example.py\n ```\n \n+## Launching training using DeepSpeed\n+\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. For using deepspeed, you don't really need to change anything in your code once you integrate accelerate in your code. You can set everything using just `accelerate config`.", "from_author": false }, { "body": "```suggestion\r\n1. DeepSpeed support is experimental for now. In case you get into some issue, please raise an issue.\r\n```\r\nUsers should always feel free to open issues :)", "diff_hunk": "@@ -160,6 +160,26 @@ Once you have MPI setup on your cluster, just run:\n mpirun -np 2 python examples/nlp_example.py\n ```\n \n+## Launching training using DeepSpeed\n+\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. For using deepspeed, you don't really need to change anything in your code once you integrate accelerate in your code. You can set everything using just `accelerate config`.\n+\n+In case you would like to change your deepspeed related args from python script, we provide you the `DeepSpeedPlugin`.\n+\n+```python\n+from accelerator import Accelerator, DeepSpeedPlugin\n+\n+# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it\n+# Remember you need to do gradient accumulation by yourself, just like you would have done without deepspeed\n+deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)\n+accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)\n+```\n+\n+Note:\n+\n+1. DeepSpeed support is experimental for now. In case you get into some issue, feel free to raise an issue.", "from_author": false }, { "body": "The file `deepspeed_utils` has a hard requirement on `deepspeed` being available", "diff_hunk": "@@ -20,12 +20,22 @@\n from packaging import version\n \n from .data_loader import prepare_data_loader\n+from .deepspeed_utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedPlugin", "from_author": false }, { "body": "This should be conditionally imported if objects are to be imported from this file in others where deepspeed isn't necessarily installed", "diff_hunk": "@@ -0,0 +1,150 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from dataclasses import dataclass, field\n+\n+from deepspeed import DeepSpeedEngine", "from_author": false }, { "body": "I don't understand this argument", "diff_hunk": "@@ -0,0 +1,150 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from dataclasses import dataclass, field\n+\n+from deepspeed import DeepSpeedEngine\n+\n+from .optimizer import AcceleratedOptimizer\n+from .state import is_apex_available\n+\n+\n+if is_apex_available():\n+ import amp\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )", "from_author": false }, { "body": "Is it necessary to have this here if it shouldn't be used?", "diff_hunk": "@@ -0,0 +1,150 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from dataclasses import dataclass, field\n+\n+from deepspeed import DeepSpeedEngine\n+\n+from .optimizer import AcceleratedOptimizer\n+from .state import is_apex_available\n+\n+\n+if is_apex_available():\n+ import amp\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )\n+\n+ auto_opt_mapping: bool = field(\n+ default=True,\n+ metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n+ )\n+\n+ offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n+\n+ fp16: bool = field(repr=False, default=None, metadata={\"help\": \"You need not define this here\"})", "from_author": false }, { "body": "This would place the comment as a docstring, but is that really the goal? Shouldn't it rather be:\r\n```suggestion\r\n pass # `model.step()` is doing that automatically. Therefore, it's implementation is not needed\r\n```", "diff_hunk": "@@ -0,0 +1,150 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from dataclasses import dataclass, field\n+\n+from deepspeed import DeepSpeedEngine\n+\n+from .optimizer import AcceleratedOptimizer\n+from .state import is_apex_available\n+\n+\n+if is_apex_available():\n+ import amp\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )\n+\n+ auto_opt_mapping: bool = field(\n+ default=True,\n+ metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n+ )\n+\n+ offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n+\n+ fp16: bool = field(repr=False, default=None, metadata={\"help\": \"You need not define this here\"})\n+\n+ def __post_init__(self):\n+\n+ if self.gradient_accumulation_steps is None:\n+ self.gradient_accumulation_steps = int(os.environ.get(\"GRADIENT_ACCUMULATION_STEPS\", 1))\n+\n+ if self.zero_stage is None:\n+ self.zero_stage = int(os.environ.get(\"DEEPSPEED_ZERO_STAGE\", 2))\n+\n+ if self.fp16 is None:\n+ self.fp16 = bool(os.environ.get(\"USE_FP16\", \"False\"))\n+\n+ if self.offload_optimizer_device is None:\n+ self.offload_optimizer_device = os.environ.get(\"DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE\", \"none\")\n+\n+ self.deepspeed_config = {\n+ \"train_batch_size\": None,\n+ \"fp16\": {\"enabled\": self.fp16},\n+ \"gradient_accumulation_steps\": self.gradient_accumulation_steps,\n+ \"zero_optimization\": {\n+ \"stage\": self.zero_stage,\n+ \"offload_optimizer\": {\n+ \"device\": self.offload_optimizer_device,\n+ },\n+ },\n+ \"steps_per_print\": float(\"inf\"), # this will stop deepspeed from logging @ stdout\n+ }\n+\n+\n+class DeepSpeedEngineWrapper(DeepSpeedEngine):\n+ \"\"\"\n+ Wrapper over deepspeed.DeepSpeedEngine object\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+\n+ # overwriting micro_steps for user's gradient_accumulation\n+ self.micro_steps = -1\n+\n+ def step(self, lr_kwargs=None):\n+ \"\"\"DeepSpeedEngine.step() without `micro_steps` update & no profiling\"\"\"\n+ if self.is_gradient_accumulation_boundary(): # it shouldn't matter whether we keep this line or not\n+ if self.progressive_layer_drop:\n+ self.progressive_layer_drop.update_state(self.global_steps)\n+\n+ self._take_model_step(lr_kwargs)\n+\n+ def backward(self, loss):\n+ \"\"\"DeepSpeedEngine.backward() with with no loss scaling; no profiling but with `micro_steps` update\"\"\"\n+\n+ if self.zero_optimization():\n+ self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()\n+ self.optimizer.backward(loss)\n+ elif self.amp_enabled():\n+ # AMP requires delaying unscale when inside gradient accumulation boundaries\n+ # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n+ delay_unscale = not self.is_gradient_accumulation_boundary()\n+ with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:\n+ scaled_loss.backward()\n+ elif self.fp16_enabled():\n+ self.optimizer.backward(loss)\n+ else:\n+ loss.backward()\n+\n+ if self.enable_backward_allreduce:\n+ self.allreduce_gradients()\n+\n+ # this will ensure deepspeed gradient_accumulation matches user's accumulation\n+ self.micro_steps += 1\n+\n+\n+class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):\n+ \"\"\"\n+ Internal wrapper around a deepspeed optimizer.\n+\n+ Args:\n+ optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n+ The optimizer to wrap.\n+ \"\"\"\n+\n+ def __init__(self, optimizer, model: DeepSpeedEngineWrapper):\n+ super().__init__(optimizer, device_placement=False, scaler=None)\n+\n+ self.model = model\n+\n+ def zero_grad(self, set_to_none=None):\n+ \"\"\"`model.step()` is doing that automatically. Therefore, it's implementation is not needed\"\"\"", "from_author": false }, { "body": "Do we really need a `DeepSpeedPlugin` to be initialized if deepspeed isn't available?", "diff_hunk": "@@ -74,10 +84,18 @@ def __init__(\n split_batches: bool = False,\n fp16: bool = None,\n cpu: bool = False,\n+ deepspeed_plugin: DeepSpeedPlugin = None,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n- self.state = AcceleratorState(fp16=fp16, cpu=cpu, _from_accelerator=True)\n+ if deepspeed_plugin is None:\n+ deepspeed_plugin = DeepSpeedPlugin() # init from env variables\n+ else:\n+ assert isinstance(\n+ deepspeed_plugin, DeepSpeedPlugin\n+ ), \"`deepspeed_plugin` must be a DeepSpeedPlugin object.\"", "from_author": false }, { "body": "```suggestion\r\n \"Do you want to use DeepSpeed (yes/no)? [no]\",\r\n```", "diff_hunk": "@@ -50,6 +50,39 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ deepspeed_config = None\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n+ use_deepspeed = _ask_field(\n+ \"Do you want to use DeepSpeed (yes/no)? [NO]\",", "from_author": false }, { "body": "DeepSpeed needs to know the batch size of the data even if we build dataloader by ourself. & we are extracting batch size from dataloader internally. But whenever 2 dataloaders are passed in `accelerator.prepare()` (i.e. both train & eval), we can't directly take the batch size from `train_dataloader`, since internally we don't know which is train & which is eval (because of the way accelerator.prepare() takes in args). So this argument will help users to decide what is train batch size & what is eval.", "diff_hunk": "@@ -0,0 +1,150 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from dataclasses import dataclass, field\n+\n+from deepspeed import DeepSpeedEngine\n+\n+from .optimizer import AcceleratedOptimizer\n+from .state import is_apex_available\n+\n+\n+if is_apex_available():\n+ import amp\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )", "from_author": true }, { "body": "I will try to find other solution.", "diff_hunk": "@@ -0,0 +1,150 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+from dataclasses import dataclass, field\n+\n+from deepspeed import DeepSpeedEngine\n+\n+from .optimizer import AcceleratedOptimizer\n+from .state import is_apex_available\n+\n+\n+if is_apex_available():\n+ import amp\n+\n+\n+@dataclass\n+class DeepSpeedPlugin:\n+\n+ gradient_accumulation_steps: int = field(\n+ default=None, metadata={\"help\": \"Number of steps to accumulate gradients before updating optimizer states\"}\n+ )\n+ zero_stage: int = field(\n+ default=None,\n+ metadata={\"help\": \"Possible options are 0,1,2,3; Default will be taken from environment variable\"},\n+ )\n+ is_train_batch_min: str = field(\n+ default=True,\n+ metadata={\"help\": \"If both train & eval dataloaders are specified, this will decide the train_batch_size\"},\n+ )\n+\n+ auto_opt_mapping: bool = field(\n+ default=True,\n+ metadata={\"help\": \"whether to map torch.adam to deepspeed optimizer version of adam based on config\"},\n+ )\n+\n+ offload_optimizer_device: bool = field(default=None, metadata={\"help\": \"Possible options are none|cpu|nvme\"})\n+\n+ fp16: bool = field(repr=False, default=None, metadata={\"help\": \"You need not define this here\"})", "from_author": true }, { "body": "I think you probably want a space at the end of this to be consistent with the other _ask_fields", "diff_hunk": "@@ -50,6 +50,39 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ deepspeed_config = None\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n+ use_deepspeed = _ask_field(\n+ \"Do you want to use DeepSpeed (yes/no)? [NO]\",", "from_author": false }, { "body": "Should actually be \r\n```\r\n \"Do you want to use DeepSpeed? [yes/NO]: \",\r\n```\r\nto be compatible with what is actually in the lib for now.", "diff_hunk": "@@ -50,6 +50,39 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ deepspeed_config = None\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n+ use_deepspeed = _ask_field(\n+ \"Do you want to use DeepSpeed (yes/no)? [NO]\",", "from_author": false }, { "body": "```suggestion\r\n \"Where to offload optimizer states? [NONE/cpu/nvme]: \",\r\n```\r\n\r\nAlso to have the same kind of syntax as the others.", "diff_hunk": "@@ -50,6 +50,39 @@ def get_cluster_input():\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n+\n+ deepspeed_config = None\n+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:\n+ use_deepspeed = _ask_field(\n+ \"Do you want to use DeepSpeed (yes/no)? [NO]\",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ if use_deepspeed:\n+ distributed_type = DistributedType.DEEPSPEED\n+\n+ deepspeed_config = {}\n+ if distributed_type == DistributedType.DEEPSPEED:\n+ deepspeed_config[\"zero_stage\"] = _ask_field(\n+ \"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: \",\n+ lambda x: int(x),\n+ default=2,\n+ )\n+\n+ if deepspeed_config[\"zero_stage\"] >= 2:\n+ deepspeed_config[\"offload_optimizer_device\"] = _ask_field(\n+ \"Where to offload optimizer states (none|cpu|nvme)? [none]: \",", "from_author": false }, { "body": "```suggestion\r\nπŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. to use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.\r\n```", "diff_hunk": "@@ -160,6 +160,26 @@ Once you have MPI setup on your cluster, just run:\n mpirun -np 2 python examples/nlp_example.py\n ```\n \n+## Launching training using DeepSpeed\n+\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. For using deepspeed, you don't need to change anything. You can set everything using just `accelerate config`. However, if you desire to tweak your deepspeed related args from your python script, we provide you the `DeepSpeedPlugin`.", "from_author": false }, { "body": "```suggestion\r\n# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed\r\n```", "diff_hunk": "@@ -160,6 +160,26 @@ Once you have MPI setup on your cluster, just run:\n mpirun -np 2 python examples/nlp_example.py\n ```\n \n+## Launching training using DeepSpeed\n+\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. For using deepspeed, you don't need to change anything. You can set everything using just `accelerate config`. However, if you desire to tweak your deepspeed related args from your python script, we provide you the `DeepSpeedPlugin`.\n+\n+```python\n+from accelerator import Accelerator, DeepSpeedPlugin\n+\n+# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it\n+# Remember you need to do gradient accumulation by yourself, just like you would have done without deepspeed", "from_author": false }, { "body": "```suggestion\r\nNote: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue.\r\n```", "diff_hunk": "@@ -160,6 +160,26 @@ Once you have MPI setup on your cluster, just run:\n mpirun -np 2 python examples/nlp_example.py\n ```\n \n+## Launching training using DeepSpeed\n+\n+πŸ€— Accelerate supports training on single/multiple GPUs using DeepSpeed. For using deepspeed, you don't need to change anything. You can set everything using just `accelerate config`. However, if you desire to tweak your deepspeed related args from your python script, we provide you the `DeepSpeedPlugin`.\n+\n+```python\n+from accelerator import Accelerator, DeepSpeedPlugin\n+\n+# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it\n+# Remember you need to do gradient accumulation by yourself, just like you would have done without deepspeed\n+deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)\n+accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)\n+\n+# How to save your πŸ€— Transformer?\n+accelerator.wait_for_everyone()\n+unwrapped_model = accelerator.unwrap_model(model)\n+unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model))\n+```\n+\n+Note: DeepSpeed support is experimental for now. In case you get into some issue, please raise an issue.", "from_author": false } ], "context": [ { "body": "Note for before merging this: the PR should be rebased on master and the reference to the model added in the `Accelerator` should be cleaned up in the method introduced in the PR above.", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/82", "pr_id": 643935829 }, { "diff": "diff --git a/README.md b/README.md\nindex 9c044ce9e..4f6cf5f40 100644\n--- a/README.md\n+++ b/README.md\n@@ -63,12 +63,12 @@ Here is an example:\n + device = accelerator.device\n \n model = torch.nn.Transformer().to(device)\n- optim = torch.optim.Adam(model.parameters())\n+ optimizer = torch.optim.Adam(model.parameters())\n \n dataset = load_dataset('my_dataset')\n data = torch.utils.data.DataLoader(dataset, shuffle=True)\n \n-+ model, optim, data = accelerator.prepare(model, optim, data)\n++ model, optimizer, data = accelerator.prepare(model, optimizer, data)\n \n model.train()\n for epoch in range(10):\n@@ -104,12 +104,12 @@ In particular, the same code can then be run without modification on your local\n \n - model = torch.nn.Transformer().to(device)\n + model = torch.nn.Transformer()\n- optim = torch.optim.Adam(model.parameters())\n+ optimizer = torch.optim.Adam(model.parameters())\n \n dataset = load_dataset('my_dataset')\n data = torch.utils.data.DataLoader(dataset, shuffle=True)\n \n-+ model, optim, data = accelerator.prepare(model, optim, data)\n++ model, optimizer, data = accelerator.prepare(model, optimizer, data)\n \n model.train()\n for epoch in range(10):\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/81", "pr_id": 643899032 }, { "diff": "diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 0e115bdd4..8cc993b59 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -39,7 +39,8 @@\n \n \n def load_config_from_file(config_file):\n- config_file = config_file if config_file is not None else default_config_file\n+ config_file_exists = config_file is not None and os.path.isfile(config_file)\n+ config_file = config_file if config_file_exists else default_config_file\n with open(config_file, \"r\", encoding=\"utf-8\") as f:\n if config_file.endswith(\".json\"):\n if (\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/79", "pr_id": 641198528 }, { "diff": "diff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nindex 3a9b03e28..4c785b12f 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/notebook_launcher.py\n@@ -106,7 +106,7 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n try:\n print(f\"Launching a training on {num_processes} GPUs.\")\n- start_processes(launcher, nprocs=num_processes, start_method=\"fork\")\n+ start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n finally:\n # Clean up the environment variables set.\n del os.environ[\"WORLD_SIZE\"]\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/78", "pr_id": 640586316 }, { "diff": "diff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nindex 57f253267..3a9b03e28 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/notebook_launcher.py\n@@ -42,10 +42,10 @@ def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use\n The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n # Are we in a google colab or a Kaggle Kernel?\n- if \"IPython\" in sys.modules:\n- in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n- elif any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\n+ if any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\n in_colab_or_kaggle = True\n+ elif \"IPython\" in sys.modules:\n+ in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n else:\n in_colab_or_kaggle = False\n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/73", "pr_id": 637599010 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex 434a230ea..4d941b5d3 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -110,7 +110,7 @@ The same script can be run in any of the following configurations:\n Prior to running it you should install timm and torchvision:\n \n ```bash\n-pip install timm, torchvision\n+pip install timm torchvision\n ```\n \n and you should download the data with the following commands:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/70", "pr_id": 634579457 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 63b53eccb..1dd9f506a 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -17,6 +17,7 @@\n from packaging import version\n \n from .state import AcceleratorState, DistributedType, is_tpu_available\n+from .utils import honor_type\n \n \n if is_tpu_available():\n@@ -25,7 +26,7 @@\n \n def move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n- return type(state)(move_to_device(t, device) for t in state)\n+ return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 9e38cbad9..7302ead15 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -99,6 +99,17 @@ def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Opti\n synchronize_rng_state(RNGType(rng_type), generator=generator)\n \n \n+def honor_type(obj, generator):\n+ \"\"\"\n+ Cast a generator to the same type as obj (list, tuple or namedtuple)\n+ \"\"\"\n+ # There is no direct check whether an object if of type namedtuple sadly, this is a workaround.\n+ if isinstance(obj, tuple) and hasattr(obj, \"_fields\"):\n+ # Can instantiate a namedtuple from a generator directly, contrary to a tuple/list.\n+ return type(obj)(*list(generator))\n+ return type(obj)(generator)\n+\n+\n def send_to_device(tensor, device):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n@@ -113,7 +124,7 @@ def send_to_device(tensor, device):\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if isinstance(tensor, (list, tuple)):\n- return type(tensor)(send_to_device(t, device) for t in tensor)\n+ return honor_type(tensor, (send_to_device(t, device) for t in tensor))\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n@@ -138,7 +149,7 @@ def extract_model_from_parallel(model):\n \n def _tpu_gather(tensor, name=\"tensor\"):\n if isinstance(tensor, (list, tuple)):\n- return type(tensor)(_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor))\n+ return honor_type(tensor, (_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor)))\n elif isinstance(tensor, dict):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n@@ -148,7 +159,7 @@ def _tpu_gather(tensor, name=\"tensor\"):\n \n def _gpu_gather(tensor):\n if isinstance(tensor, (list, tuple)):\n- return type(tensor)(_gpu_gather(t) for t in tensor)\n+ return honor_type(tensor, (_gpu_gather(t) for t in tensor))\n elif isinstance(tensor, dict):\n return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n@@ -198,7 +209,7 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n Whether to pad at the beginning or the end.\n \"\"\"\n if isinstance(tensor, (list, tuple)):\n- return type(tensor)(pad_across_processes(t, dim=dim, pad_index=pad_index) for t in tensor)\n+ return honor_type(tensor, (pad_across_processes(t, dim=dim, pad_index=pad_index) for t in tensor))\n elif isinstance(tensor, dict):\n return type(tensor)({k: pad_across_processes(v, dim=dim, pad_index=pad_index) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nnew file mode 100644\nindex 000000000..ca617634d\n--- /dev/null\n+++ b/tests/test_utils.py\n@@ -0,0 +1,53 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from collections import namedtuple\n+\n+import torch\n+\n+from accelerate.utils import send_to_device\n+\n+\n+TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b\")\n+\n+\n+class UtilsTester(unittest.TestCase):\n+ def test_send_to_device(self):\n+ tensor = torch.randn(5, 2)\n+ device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n+\n+ result1 = send_to_device(tensor, device)\n+ self.assertTrue(torch.equal(result1.cpu(), tensor))\n+\n+ result2 = send_to_device((tensor, [tensor, tensor]), device)\n+ self.assertIsInstance(result2, tuple)\n+ self.assertTrue(torch.equal(result2[0].cpu(), tensor))\n+ self.assertIsInstance(result2[1], list)\n+ self.assertTrue(torch.equal(result2[1][0].cpu(), tensor))\n+ self.assertTrue(torch.equal(result2[1][1].cpu(), tensor))\n+\n+ result2 = send_to_device({\"a\": tensor, \"b\": [tensor, tensor]}, device)\n+ self.assertIsInstance(result2, dict)\n+ self.assertTrue(torch.equal(result2[\"a\"].cpu(), tensor))\n+ self.assertIsInstance(result2[\"b\"], list)\n+ self.assertTrue(torch.equal(result2[\"b\"][0].cpu(), tensor))\n+ self.assertTrue(torch.equal(result2[\"b\"][1].cpu(), tensor))\n+\n+ result3 = send_to_device(TestNamedTuple(a=tensor, b=[tensor, tensor]), device)\n+ self.assertIsInstance(result3, TestNamedTuple)\n+ self.assertTrue(torch.equal(result3.a.cpu(), tensor))\n+ self.assertIsInstance(result3.b, list)\n+ self.assertTrue(torch.equal(result3.b[0].cpu(), tensor))\n+ self.assertTrue(torch.equal(result3.b[1].cpu(), tensor))\n", "code_comments": [ { "body": "Great tests :)", "diff_hunk": "@@ -0,0 +1,53 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+from collections import namedtuple\n+\n+import torch\n+\n+from accelerate.utils import send_to_device\n+\n+\n+TestNamedTuple = namedtuple(\"TestNamedTuple\", \"a b\")\n+\n+\n+class UtilsTester(unittest.TestCase):", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/67", "pr_id": 630984689 }, { "diff": "diff --git a/README.md b/README.md\nindex 6e2211143..9c044ce9e 100644\n--- a/README.md\n+++ b/README.md\n@@ -151,6 +151,15 @@ For instance, here is how you would run the GLUE example on the MRPC task (from\n accelerate launch examples/nlp_example.py\n ```\n \n+## Launching multi-CPU run using MPI\n+\n+πŸ€— Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.\n+Once you have MPI setup on your cluster, just run:\n+\n+```bash\n+mpirun -np 2 python examples/nlp_example.py\n+```\n+\n ## Launching your training from a notebook\n \n πŸ€— Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add:\n@@ -188,6 +197,8 @@ pip install accelerate\n ## Supported integrations\n \n - CPU only\n+- multi-CPU on one node (machine)\n+- multi-CPU on several nodes (machines)\n - single GPU\n - multi-GPU on one node (machine)\n - multi-GPU on several nodes (machines)\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 0143ac949..c7706d390 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -223,6 +223,9 @@ def prepare_model(self, model):\n output_device=self.local_process_index,\n **kwargs,\n )\n+ elif self.distributed_type == DistributedType.MULTI_CPU:\n+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n+ model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n return model\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex c8bf1bcd7..ebc176089 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -22,16 +22,16 @@\n \n def get_cluster_input():\n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): \",\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): \",\n _convert_distributed_mode,\n- error_message=\"Please enter 0, 1 or 2.\",\n+ error_message=\"Please enter 0, 1, 2 or 3.\",\n )\n \n machine_rank = 0\n num_machines = 1\n main_process_ip = None\n main_process_port = None\n- if distributed_type == DistributedType.MULTI_GPU:\n+ if distributed_type == DistributedType.MULTI_GPU or distributed_type == DistributedType.MULTI_CPU:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n lambda x: int(x),\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nindex dd14965f7..2bc0e7ec9 100644\n--- a/src/accelerate/commands/config/config_utils.py\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -37,7 +37,7 @@ def _convert_compute_environment(value):\n \n def _convert_distributed_mode(value):\n value = int(value)\n- return DistributedType([\"NO\", \"MULTI_GPU\", \"TPU\"][value])\n+ return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\"][value])\n \n \n def _convert_sagemaker_distributed_mode(value):\ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 042a6bdde..76376038b 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -19,6 +19,14 @@\n import torch\n \n \n+try:\n+ import torch_ccl # noqa: F401\n+\n+ _ccl_available = True\n+except ImportError:\n+ _ccl_available = False\n+\n+\n try:\n import torch_xla.core.xla_model as xm\n \n@@ -27,6 +35,19 @@\n _tpu_available = False\n \n \n+def get_int_from_env(env_keys, default):\n+ \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\n+ for e in env_keys:\n+ val = int(os.environ.get(e, -1))\n+ if val >= 0:\n+ return val\n+ return default\n+\n+\n+def is_ccl_available():\n+ return _ccl_available\n+\n+\n def is_tpu_available():\n return _tpu_available\n \n@@ -43,12 +64,14 @@ class DistributedType(str, Enum):\n Values:\n \n - **NO** -- Not a distributed environment, just a single process.\n+ - **MULTI_CPU** -- Distributed on multiple CPU nodes.\n - **MULTI_GPU** -- Distributed on multiple GPUs.\n - **TPU** -- Distributed on TPUs.\n \"\"\"\n \n # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.\n NO = \"NO\"\n+ MULTI_CPU = \"MULTI_CPU\"\n MULTI_GPU = \"MULTI_GPU\"\n TPU = \"TPU\"\n \n@@ -107,6 +130,7 @@ class AcceleratorState:\n def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool = False):\n self.__dict__ = self._shared_state\n if not getattr(self, \"initialized\", False):\n+ self.backend = None\n if not _from_accelerator:\n raise ValueError(\n \"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` \"\n@@ -123,12 +147,50 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif get_int_from_env([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"], 1) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and get_int_from_env([\"CCL_WORKER_COUNT\"], 0) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH\n+ rank = get_int_from_env([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = get_int_from_env([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = get_int_from_env(\n+ [\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0\n+ )\n+ local_size = get_int_from_env(\n+ [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n+ )\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ os.environ[\"LOCAL_RANK\"] = str(local_rank)\n+ if not os.environ.get(\"MASTER_PORT\", None):\n+ os.environ[\"MASTER_PORT\"] = \"29500\"\n+ if not os.environ.get(\"MASTER_ADDR\", None):\n+ if local_size != size and backend != \"mpi\":\n+ raise ValueError(\n+ \"Looks like distributed multinode run but MASTER_ADDR env not set, \"\n+ \"please try exporting rank 0's hostname as MASTER_ADDR\"\n+ )\n+ if not torch.distributed.is_initialized():\n+ torch.distributed.init_process_group(backend, rank=rank, world_size=size)\n+ self.backend = backend\n+ self.num_processes = torch.distributed.get_world_size()\n+ self.process_index = torch.distributed.get_rank()\n+ self.local_process_index = local_rank\n+ self.device = torch.device(\"cpu\")\n+ self.use_fp16 = False\n else:\n self.distributed_type = DistributedType.NO\n self.num_processes = 1\n@@ -139,7 +201,7 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n \n def __repr__(self):\n return (\n- f\"Distributed environment: {self.distributed_type}\\n\"\n+ f\"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\\n\"\n f\"Num processes: {self.num_processes}\\n\"\n f\"Process index: {self.process_index}\\n\"\n f\"Local process index: {self.local_process_index}\\n\"\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 5dfe85e54..9e38cbad9 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -80,6 +80,8 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n+ elif state.distributed_type == DistributedType.MULTI_CPU:\n+ torch.distributed.broadcast(rng_state, 0)\n \n # Set the broadcast rng state\n if rng_type == RNGType.TORCH:\n@@ -156,6 +158,9 @@ def _gpu_gather(tensor):\n return torch.cat(output_tensors, dim=0)\n \n \n+_cpu_gather = _gpu_gather\n+\n+\n def gather(tensor):\n \"\"\"\n Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n@@ -171,6 +176,8 @@ def gather(tensor):\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n return _gpu_gather(tensor)\n+ elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n+ return _cpu_gather(tensor)\n else:\n return tensor\n \n@@ -230,7 +237,10 @@ def wait_for_everyone():\n \n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n \"\"\"\n- if AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n+ if (\n+ AcceleratorState().distributed_type == DistributedType.MULTI_GPU\n+ or AcceleratorState().distributed_type == DistributedType.MULTI_CPU\n+ ):\n torch.distributed.barrier()\n elif AcceleratorState().distributed_type == DistributedType.TPU:\n xm.rendezvous(\"accelerate.utils.wait_for_everyone\")\n@@ -266,7 +276,7 @@ def __init__(self, launcher, distributed_type=\"NO\"):\n self.distributed_type = DistributedType(distributed_type)\n \n def __call__(self, index, *args):\n- if self.distributed_type == DistributedType.MULTI_GPU:\n+ if self.distributed_type == DistributedType.MULTI_GPU or self.distributed_type == DistributedType.MULTI_CPU:\n # Prepare the environment for torch.distributed\n os.environ[\"LOCAL_RANK\"] = str(index)\n os.environ[\"RANK\"] = str(index)\n", "code_comments": [ { "body": "```suggestion\r\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\r\n```\r\nThis can probably fit in one line.", "diff_hunk": "@@ -223,6 +223,12 @@ def prepare_model(self, model):\n output_device=self.local_process_index,\n **kwargs,\n )\n+ elif self.distributed_type == DistributedType.MULTI_CPU:\n+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n+ model = torch.nn.parallel.DistributedDataParallel(\n+ model,\n+ **kwargs,\n+ )", "from_author": false }, { "body": "```suggestion\r\n \"Which type of machine are you using? ([0] No distributed training, [1] multinode-CPU, [2] multi-GPU, [3] TPU): \",\r\n```\r\nThis is not multi-CPU on one machine, it's multiple machines for CPU, so I think we should be more explicit than multi-CPU", "diff_hunk": "@@ -22,16 +22,16 @@\n \n def get_cluster_input():\n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): \",\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): \",", "from_author": false }, { "body": "The name is a bit too short, and we don't use 2 like this in this repo (for instance we have `send_to_device` not `send2device`). It also returns the first match What about `get_int_from_env`?\r\n\r\n`env_list` is also not the right name for the argument as we don't pass a list of envs, but a lsit of names to query in the env, so I would use `env_keys` instead of `env_list`. `default` should not have a default value since there is not a canonical one (the function is used with -1 once, 0 twice and 1 twice below).\r\n\r\nLastly, a short docstring that explains what this function does (returning the first positive env found in the list or the default) would be awesome!", "diff_hunk": "@@ -27,6 +35,18 @@\n _tpu_available = False\n \n \n+def env2int(env_list, default=-1):", "from_author": false }, { "body": "I think the value should also be `MULTI_NODE_CPU`.", "diff_hunk": "@@ -37,7 +37,7 @@ def _convert_compute_environment(value):\n \n def _convert_distributed_mode(value):\n value = int(value)\n- return DistributedType([\"NO\", \"MULTI_GPU\", \"TPU\"][value])\n+ return DistributedType([\"NO\", \"MULTI_CPU\", \"MULTI_GPU\", \"TPU\"][value])", "from_author": false }, { "body": "```suggestion\r\n if not os.environ.get(\"MASTER_PORT\", None):\r\n os.environ[\"MASTER_PORT\"] = \"29500\"\r\n```", "diff_hunk": "@@ -123,12 +146,43 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif env2int([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"]) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and env2int([\"CCL_WORKER_COUNT\"]) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ rank = env2int([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = env2int([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = env2int([\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0)\n+ local_size = env2int([\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1)\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ if not os.environ.get(\"MASTER_PORT\", None): os.environ[\"MASTER_PORT\"] = \"29500\"", "from_author": false }, { "body": "I thin we should raise a ValueError here telling the user to pass a `MASTER_ADDR` instead of relying on a default.", "diff_hunk": "@@ -123,12 +146,43 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif env2int([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"]) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and env2int([\"CCL_WORKER_COUNT\"]) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ rank = env2int([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = env2int([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = env2int([\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0)\n+ local_size = env2int([\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1)\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ if not os.environ.get(\"MASTER_PORT\", None): os.environ[\"MASTER_PORT\"] = \"29500\"\n+ if not os.environ.get(\"MASTER_ADDR\", None):\n+ if local_size != size and backend != \"mpi\":\n+ print(\"Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default\")\n+ print(\"If this run hangs, try exporting rank 0's hostname as MASTER_ADDR\")", "from_author": false }, { "body": "This one is not used anywhere, so I would remove it.", "diff_hunk": "@@ -123,12 +146,43 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif env2int([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"]) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and env2int([\"CCL_WORKER_COUNT\"]) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ rank = env2int([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = env2int([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = env2int([\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0)\n+ local_size = env2int([\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1)\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ if not os.environ.get(\"MASTER_PORT\", None): os.environ[\"MASTER_PORT\"] = \"29500\"\n+ if not os.environ.get(\"MASTER_ADDR\", None):\n+ if local_size != size and backend != \"mpi\":\n+ print(\"Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default\")\n+ print(\"If this run hangs, try exporting rank 0's hostname as MASTER_ADDR\")\n+ os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ if not torch.distributed.is_initialized():\n+ torch.distributed.init_process_group(backend, rank=rank, world_size=size)\n+ self.backend = backend\n+ self.num_processes = torch.distributed.get_world_size()\n+ self.process_index = torch.distributed.get_rank()\n+ self.local_process_index = local_rank\n+ self.local_num_processes = local_size", "from_author": false }, { "body": "We should only display the backend if it's not None.", "diff_hunk": "@@ -139,7 +193,7 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n \n def __repr__(self):\n return (\n- f\"Distributed environment: {self.distributed_type}\\n\"\n+ f\"Distributed environment: {self.distributed_type} Backend: {self.backend}\\n\"", "from_author": false }, { "body": "Well, actually we can have multi-cpu on a single machine. The typical use case is one process (or rank as it is called in MPI world) per socket. So, on a dual socket machine, we can have two ranks per node. So, I suggest keeping it as multi-CPU.", "diff_hunk": "@@ -22,16 +22,16 @@\n \n def get_cluster_input():\n distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): \",\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): \",", "from_author": true }, { "body": "Some MPI-like custom backends can initialize without MASTER_ADDR so I just put a warning. I can change to ValueError if you think that's a better choice.", "diff_hunk": "@@ -123,12 +146,43 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif env2int([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"]) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and env2int([\"CCL_WORKER_COUNT\"]) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ rank = env2int([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = env2int([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = env2int([\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0)\n+ local_size = env2int([\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1)\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ if not os.environ.get(\"MASTER_PORT\", None): os.environ[\"MASTER_PORT\"] = \"29500\"\n+ if not os.environ.get(\"MASTER_ADDR\", None):\n+ if local_size != size and backend != \"mpi\":\n+ print(\"Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default\")\n+ print(\"If this run hangs, try exporting rank 0's hostname as MASTER_ADDR\")", "from_author": true }, { "body": "We are in a test where `backend != \"mpi\"`, so I would adapt the error message (to say the backend needs a MASTER_ADDR) and raise an error. For MPI, it will still work without the MASTER_ADDR.", "diff_hunk": "@@ -123,12 +146,43 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif env2int([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"]) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and env2int([\"CCL_WORKER_COUNT\"]) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ rank = env2int([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = env2int([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = env2int([\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0)\n+ local_size = env2int([\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1)\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ if not os.environ.get(\"MASTER_PORT\", None): os.environ[\"MASTER_PORT\"] = \"29500\"\n+ if not os.environ.get(\"MASTER_ADDR\", None):\n+ if local_size != size and backend != \"mpi\":\n+ print(\"Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default\")\n+ print(\"If this run hangs, try exporting rank 0's hostname as MASTER_ADDR\")", "from_author": false }, { "body": "Made the change.", "diff_hunk": "@@ -123,12 +146,43 @@ def __init__(self, fp16: bool = None, cpu: bool = False, _from_accelerator: bool\n self.distributed_type = DistributedType.MULTI_GPU\n if not torch.distributed.is_initialized():\n torch.distributed.init_process_group(backend=\"nccl\")\n+ self.backend = \"nccl\"\n self.num_processes = torch.distributed.get_world_size()\n self.process_index = torch.distributed.get_rank()\n self.local_process_index = int(os.environ.get(\"LOCAL_RANK\", -1))\n self.device = torch.device(\"cuda\", self.local_process_index)\n torch.cuda.set_device(self.device)\n self.use_fp16 = parse_flag_from_env(\"USE_FP16\", False) if fp16 is None else fp16\n+ elif env2int([\"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\", \"WORLD_SIZE\"]) > 1:\n+ self.distributed_type = DistributedType.MULTI_CPU\n+ if is_ccl_available() and env2int([\"CCL_WORKER_COUNT\"]) > 0:\n+ backend = \"ccl\"\n+ elif torch.distributed.is_mpi_available():\n+ backend = \"mpi\"\n+ else:\n+ backend = \"gloo\"\n+ rank = env2int([\"RANK\", \"PMI_RANK\", \"OMPI_COMM_WORLD_RANK\", \"MV2_COMM_WORLD_RANK\"], 0)\n+ size = env2int([\"WORLD_SIZE\", \"PMI_SIZE\", \"OMPI_COMM_WORLD_SIZE\", \"MV2_COMM_WORLD_SIZE\"], 1)\n+ local_rank = env2int([\"LOCAL_RANK\", \"MPI_LOCALRANKID\", \"OMPI_COMM_WORLD_LOCAL_RANK\", \"MV2_COMM_WORLD_LOCAL_RANK\"], 0)\n+ local_size = env2int([\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1)\n+ self.local_process_index = local_rank\n+ os.environ[\"RANK\"] = str(rank)\n+ os.environ[\"WORLD_SIZE\"] = str(size)\n+ if not os.environ.get(\"MASTER_PORT\", None): os.environ[\"MASTER_PORT\"] = \"29500\"\n+ if not os.environ.get(\"MASTER_ADDR\", None):\n+ if local_size != size and backend != \"mpi\":\n+ print(\"Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default\")\n+ print(\"If this run hangs, try exporting rank 0's hostname as MASTER_ADDR\")", "from_author": true }, { "body": "Does this require an install of some library? Let's add how to!", "diff_hunk": "@@ -151,6 +151,15 @@ For instance, here is how you would run the GLUE example on the MRPC task (from\n accelerate launch examples/nlp_example.py\n ```\n \n+## Launching multi-CPU run using MPI\n+\n+πŸ€— Here is another way to launch multi-CPU run using MPI\n+On your cluster just run:\n+\n+```bash\n+mpirun -np 2 python examples/nlp_example.py", "from_author": false }, { "body": "```suggestion\r\n \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\r\n```", "diff_hunk": "@@ -27,6 +35,19 @@\n _tpu_available = False\n \n \n+def get_int_from_env(env_keys, default):\n+ \"\"\"Returns the first positive env found in the `env_keys` list or the default.\"\"\"", "from_author": false }, { "body": "There are multiple options e.g. [Intel MPI](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/mpi-library.html#gs.0hn0hn), [OpenMPI](https://www.open-mpi.org/) or MVAPICH. I assume, typical HPC clusters would have some version available or users can install them in userspace easily. example instructions for Open-MPI are [here](https://www.open-mpi.org/faq/?category=building#easy-build).", "diff_hunk": "@@ -151,6 +151,15 @@ For instance, here is how you would run the GLUE example on the MRPC task (from\n accelerate launch examples/nlp_example.py\n ```\n \n+## Launching multi-CPU run using MPI\n+\n+πŸ€— Here is another way to launch multi-CPU run using MPI\n+On your cluster just run:\n+\n+```bash\n+mpirun -np 2 python examples/nlp_example.py", "from_author": true }, { "body": "Ok, added a line on how to get OpenMPI...", "diff_hunk": "@@ -151,6 +151,15 @@ For instance, here is how you would run the GLUE example on the MRPC task (from\n accelerate launch examples/nlp_example.py\n ```\n \n+## Launching multi-CPU run using MPI\n+\n+πŸ€— Here is another way to launch multi-CPU run using MPI\n+On your cluster just run:\n+\n+```bash\n+mpirun -np 2 python examples/nlp_example.py", "from_author": true }, { "body": "```suggestion\r\n import torch_ccl\r\n```\r\nI think this should take care of the problem", "diff_hunk": "@@ -19,6 +19,14 @@\n import torch\n \n \n+try:\n+ import torch_ccl as ccl", "from_author": false } ], "context": [ { "body": "I have made the suggested changes. Please take a look.\r\nThanks.", "from_author": true }, { "body": "Last step, could you run `make style` on your branch? This should take care of the failing test.\r\nLet me know if run into any trouble and I can push it on the PR instead.", "from_author": false }, { "body": "Sorry, my system doesn't have black. Make gave error.\r\n\r\n```\r\nblack tests src examples\r\nmake: black: Command not found\r\nmake: *** [style] Error 127\r\n```\r\nCan you please go ahead and push it to PR?\r\nThanks.", "from_author": true }, { "body": "Just installed black and fixed formatting... It is still giving an error about torch_ccl import but I don't know how to fix it...", "from_author": true }, { "body": "Fixed issue with flake8 and squashed to single commit... Should be good to merge now.", "from_author": true }, { "body": "Thanks again for all your work on this!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/63", "pr_id": 628807140 }, { "diff": "diff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 7a8422a05..44909ab3c 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -364,7 +364,8 @@ def prepare_data_loader(\n )\n \n new_dataset = dataloader.dataset\n- new_batch_sampler = dataloader.batch_sampler\n+ # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n generator = getattr(dataloader, \"generator\", None)\n # No change if no multiprocess\n if num_processes != 1:\n@@ -413,6 +414,11 @@ def prepare_data_loader(\n for k in _PYTORCH_DATALOADER_KWARGS\n if k not in ignore_kwargs\n }\n+\n+ # Need to provide batch_size as batch_sampler is None for Iterable dataset\n+ if new_batch_sampler is None:\n+ kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\n+\n return DataLoaderShard(\n new_dataset,\n device=device if put_on_device else None,\n", "code_comments": [ { "body": "```suggestion\r\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\r\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\r\n```\r\nLet's group the two lines together.", "diff_hunk": "@@ -365,6 +365,9 @@ def prepare_data_loader(\n \n new_dataset = dataloader.dataset\n new_batch_sampler = dataloader.batch_sampler\n+ if isinstance(new_dataset, IterableDataset):\n+ # Iterable dataset doesn't like batch_sampler, data_loader creates a default one for it\n+ new_batch_sampler = None", "from_author": false }, { "body": "```suggestion\r\n # Need to provide batch_size as batch_sampler is None for Iterable dataset\r\n if new_batch_sampler is None:\r\n kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size\r\n```", "diff_hunk": "@@ -413,6 +416,10 @@ def prepare_data_loader(\n for k in _PYTORCH_DATALOADER_KWARGS\n if k not in ignore_kwargs\n }\n+\n+ # Need to provide batch_size as batch_sampler is None for Iterable dataset\n+ if new_batch_sampler is None: kwargs[\"batch_size\"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/62", "pr_id": 628803524 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex f1242d3fd..44cff4d55 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -137,6 +137,7 @@ def multi_gpu_launcher(args):\n )\n else:\n cmd.extend([\"--nproc_per_node\", str(args.num_processes)])\n+ cmd.extend([\"--master_port\", str(args.main_process_port)])\n cmd.append(args.training_script)\n cmd.extend(args.training_script_args)\n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/58", "pr_id": 625298184 }, { "diff": "diff --git a/README.md b/README.md\nindex 280473ee5..6e2211143 100644\n--- a/README.md\n+++ b/README.md\n@@ -163,8 +163,6 @@ notebook_launcher(training_function)\n \n An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb)\n \n-Note that this launcher does not work in Jupyter Notebook on a machine with multiple GPUs (yet). This feature will be released in a later version.\n-\n ## Why should I use πŸ€— Accelerate?\n \n You should use πŸ€— Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library, In fact the whole API of πŸ€— Accelerate is in one class, the `Accelerator` object.\ndiff --git a/docs/source/launcher.rst b/docs/source/launcher.rst\nindex da28ad920..1c2279946 100644\n--- a/docs/source/launcher.rst\n+++ b/docs/source/launcher.rst\n@@ -15,16 +15,15 @@ Notebook Launcher\n =======================================================================================================================\n \n Launch your training function inside a notebook. Currently supports launching a training with TPUs on [Google\n-Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kaggle.com/code), or training on one GPU,\n-but support for training on several GPUs (if the machine on which you are running your notebook has them) is planned\n-for a future release.\n+Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kaggle.com/code), as well as training on\n+several GPUs (if the machine on which you are running your notebook has them).\n \n An example can be found in `this notebook\n <https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb>`__.\n \n .. warning::\n \n- If you are training on Colab or a Kaggle kernel with TPUs, your :obj:`Accelerator` object should only be defined\n- inside the training function. This is because the initialization should be done inside the launcher only.\n+ Your :obj:`Accelerator` object should only be defined inside the training function. This is because the\n+ initialization should be done inside the launcher only.\n \n .. autofunction:: accelerate.notebook_launcher\ndiff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex d9846b777..1e28fed85 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -199,9 +199,8 @@ Launching training from a notebook\n -----------------------------------------------------------------------------------------------------------------------\n \n In Accelerate 0.3.0, a new :class:`~accelerate.notebook_launcher` has been introduced to help you launch your training\n-function from a notebook. Currently supports launching a training with TPUs on Colab, or training on one GPU, but\n-support for training on several GPUs (if the machine on which you are running your notebook has them) is planned for a\n-future release.\n+function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training\n+on several GPUs (if the machine on which you are running your notebook has them).\n \n Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a\n cell with the following code:\n@@ -214,8 +213,8 @@ cell with the following code:\n \n .. warning::\n \n- If you are training on Colab with TPUs, your :obj:`Accelerator` object should only be defined inside the training\n- function. This is because the initialization should be done inside the launcher only.\n+ Your :obj:`Accelerator` object should only be defined inside the training function. This is because the\n+ initialization should be done inside the launcher only.\n \n \n Training on TPU\ndiff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nindex e9dce7677..57f253267 100644\n--- a/src/accelerate/notebook_launcher.py\n+++ b/src/accelerate/notebook_launcher.py\n@@ -14,15 +14,15 @@\n \n import os\n import sys\n-import warnings\n \n import torch\n+from torch.multiprocessing import start_processes\n \n from .state import AcceleratorState\n from .utils import PrepareForLaunch\n \n \n-def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\", **kwargs):\n+def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, use_port=\"29500\"):\n \"\"\"\n Launches a training function, using several processes if it's possible in the current environment (TPU with\n multiple cores for instance).\n@@ -32,17 +32,15 @@ def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\"\n The training function to execute. If it accepts arguments, the first argument should be the index of the\n process run.\n args (:obj:`Tuple`):\n- Tuple of arguments to pass to the function (it will receive :obj:`(index, *args)`).\n+ Tuple of arguments to pass to the function (it will receive :obj:`*args`).\n num_processes (:obj:`int`, `optional`):\n The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to\n the number of GPUs available otherwise.\n-\n- .. warning::\n-\n- Multiple GPUs is not yet supported.\n+ use_fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ If :obj:`True`, will use mixed precision training on multi-GPU.\n+ use_port (:obj:`str`, `optional`, defaults to :obj:`\"29500\"`):\n+ The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n- launcher = PrepareForLaunch(function)\n-\n # Are we in a google colab or a Kaggle Kernel?\n if \"IPython\" in sys.modules:\n in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n@@ -65,15 +63,60 @@ def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\"\n if num_processes is None:\n num_processes = 8\n \n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- launcher(0, *args)\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n+ else:\n+ print(\"Launching training on CPU.\")\n+ function(*args)\n \n else:\n if num_processes is None:\n- num_processes = torch.cuda.device_count() if torch.cuda.is_available() else 1\n+ raise ValueError(\n+ \"You have to specify the number of GPUs you would like to use, add `num_process=...` to your call.\"\n+ )\n \n if num_processes > 1:\n- warnings.warn(\"`notebook_launcher` does not support multiple GPUs yet, launching the training on one GPU.\")\n- launcher(0, *args)\n+ # Multi-GPU launch\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n+ \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+\n+ if torch.cuda.is_initialized():\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n+ \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n+ \"function.\"\n+ )\n+\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ os.environ[\"WORLD_SIZE\"] = str(num_processes)\n+ os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ os.environ[\"MASTER_PORT\"] = str(use_port)\n+ os.environ[\"USE_FP16\"] = str(use_fp16)\n+\n+ launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n+ try:\n+ print(f\"Launching a training on {num_processes} GPUs.\")\n+ start_processes(launcher, nprocs=num_processes, start_method=\"fork\")\n+ finally:\n+ # Clean up the environment variables set.\n+ del os.environ[\"WORLD_SIZE\"]\n+ del os.environ[\"MASTER_ADDR\"]\n+ del os.environ[\"MASTER_PORT\"]\n+\n+ else:\n+ # No need for a distributed launch otherwise as it's either CPU or one GPU.\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n+ else:\n+ print(\"Launching training on CPU.\")\n+ function(*args)\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 0c2282b2c..5dfe85e54 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n import importlib\n-import inspect\n+import os\n import random\n from enum import Enum\n from typing import List, Optional, Union\n@@ -252,15 +252,23 @@ def save(obj, f):\n \n class PrepareForLaunch:\n \"\"\"\n- Prepare a function that launches a script.\n+ Prepare a function that will launched in a distributed setup.\n+\n+ Args:\n+ launcher (:obj:`Callable`):\n+ The function to launch.\n+ distributed_type (:class:`~accelerate.state.DistributedType`):\n+ The distributed type to prepare for.\n \"\"\"\n \n- def __init__(self, launcher):\n+ def __init__(self, launcher, distributed_type=\"NO\"):\n self.launcher = launcher\n+ self.distributed_type = DistributedType(distributed_type)\n+\n+ def __call__(self, index, *args):\n+ if self.distributed_type == DistributedType.MULTI_GPU:\n+ # Prepare the environment for torch.distributed\n+ os.environ[\"LOCAL_RANK\"] = str(index)\n+ os.environ[\"RANK\"] = str(index)\n \n- def __call__(self, index):\n- launcher_sig = inspect.signature(self.launcher)\n- if len(launcher_sig.parameters) == 0:\n- self.launcher()\n- else:\n- self.launcher(index)\n+ self.launcher(*args)\n", "code_comments": [ { "body": "I am not that experienced with `torch.distributed`, but is it good to have them hardcoded and not overrideable? We could add parameters to adjust them. \r\n", "diff_hunk": "@@ -65,15 +61,60 @@ def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\"\n if num_processes is None:\n num_processes = 8\n \n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- launcher(0, *args)\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n+ else:\n+ print(\"Launching training on CPU.\")\n+ function(*args)\n \n else:\n if num_processes is None:\n- num_processes = torch.cuda.device_count() if torch.cuda.is_available() else 1\n+ raise ValueError(\n+ \"You have to specify the number of GPUs you would like to use, add `num_process=...` to your call.\"\n+ )\n \n if num_processes > 1:\n- warnings.warn(\"`notebook_launcher` does not support multiple GPUs yet, launching the training on one GPU.\")\n- launcher(0, *args)\n+ # Multi-GPU launch\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n+ \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+\n+ if torch.cuda.is_initialized():\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n+ \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n+ \"function.\"\n+ )\n+\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ os.environ[\"WORLD_SIZE\"] = str(num_processes)\n+ os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ os.environ[\"MASTER_PORT\"] = \"29500\"", "from_author": false }, { "body": "Those are the default values in `torch.distributed.launch`. The master address should not change since this does not support multi-node training. I can add a kwarg for the master port.", "diff_hunk": "@@ -65,15 +61,60 @@ def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\"\n if num_processes is None:\n num_processes = 8\n \n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- launcher(0, *args)\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n+ else:\n+ print(\"Launching training on CPU.\")\n+ function(*args)\n \n else:\n if num_processes is None:\n- num_processes = torch.cuda.device_count() if torch.cuda.is_available() else 1\n+ raise ValueError(\n+ \"You have to specify the number of GPUs you would like to use, add `num_process=...` to your call.\"\n+ )\n \n if num_processes > 1:\n- warnings.warn(\"`notebook_launcher` does not support multiple GPUs yet, launching the training on one GPU.\")\n- launcher(0, *args)\n+ # Multi-GPU launch\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n+ \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+\n+ if torch.cuda.is_initialized():\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n+ \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n+ \"function.\"\n+ )\n+\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ os.environ[\"WORLD_SIZE\"] = str(num_processes)\n+ os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ os.environ[\"MASTER_PORT\"] = \"29500\"", "from_author": true }, { "body": "Yeah saw the default [here](https://github.com/pytorch/pytorch/blob/master/torch/distributed/elastic_launch.py#L394). But who knows what setup customers are using, e.g. IPV6, custom cnames etc. ", "diff_hunk": "@@ -65,15 +61,60 @@ def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\"\n if num_processes is None:\n num_processes = 8\n \n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- launcher(0, *args)\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n+ else:\n+ print(\"Launching training on CPU.\")\n+ function(*args)\n \n else:\n if num_processes is None:\n- num_processes = torch.cuda.device_count() if torch.cuda.is_available() else 1\n+ raise ValueError(\n+ \"You have to specify the number of GPUs you would like to use, add `num_process=...` to your call.\"\n+ )\n \n if num_processes > 1:\n- warnings.warn(\"`notebook_launcher` does not support multiple GPUs yet, launching the training on one GPU.\")\n- launcher(0, *args)\n+ # Multi-GPU launch\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n+ \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+\n+ if torch.cuda.is_initialized():\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n+ \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n+ \"function.\"\n+ )\n+\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ os.environ[\"WORLD_SIZE\"] = str(num_processes)\n+ os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ os.environ[\"MASTER_PORT\"] = \"29500\"", "from_author": false }, { "body": "```suggestion\r\n os.environ[\"MASTER_PORT\"] = str(use_port)\r\n```\r\nWould add the extra conversion, might be else error prone", "diff_hunk": "@@ -65,15 +63,60 @@ def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\"\n if num_processes is None:\n num_processes = 8\n \n+ launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n+ print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- launcher(0, *args)\n+ if torch.cuda.is_available():\n+ print(\"Launching training on one GPU.\")\n+ else:\n+ print(\"Launching training on CPU.\")\n+ function(*args)\n \n else:\n if num_processes is None:\n- num_processes = torch.cuda.device_count() if torch.cuda.is_available() else 1\n+ raise ValueError(\n+ \"You have to specify the number of GPUs you would like to use, add `num_process=...` to your call.\"\n+ )\n \n if num_processes > 1:\n- warnings.warn(\"`notebook_launcher` does not support multiple GPUs yet, launching the training on one GPU.\")\n- launcher(0, *args)\n+ # Multi-GPU launch\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n+ \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+\n+ if torch.cuda.is_initialized():\n+ raise ValueError(\n+ \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n+ \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n+ \"function.\"\n+ )\n+\n+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n+ # process here (the other ones will be set be the launcher).\n+ os.environ[\"WORLD_SIZE\"] = str(num_processes)\n+ os.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\n+ os.environ[\"MASTER_PORT\"] = use_port", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/56", "pr_id": 624442154 }, { "diff": "diff --git a/src/accelerate/commands/test.py b/src/accelerate/commands/test.py\nindex 1940c5039..ec6d2524b 100644\n--- a/src/accelerate/commands/test.py\n+++ b/src/accelerate/commands/test.py\n@@ -46,7 +46,7 @@ def test_command(args):\n script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + [\"test_utils\", \"test_script.py\"])\n \n test_args = f\"\"\"\n- {script_name} --config_file={args.config_file}\n+ --config_file={args.config_file} {script_name}\n \"\"\".split()\n cmd = [\"accelerate-launch\"] + test_args\n result = execute_subprocess_async(cmd, env=os.environ.copy())\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/54", "pr_id": 623905518 }, { "diff": "diff --git a/README.md b/README.md\nindex 25bc92c13..280473ee5 100644\n--- a/README.md\n+++ b/README.md\n@@ -151,6 +151,20 @@ For instance, here is how you would run the GLUE example on the MRPC task (from\n accelerate launch examples/nlp_example.py\n ```\n \n+## Launching your training from a notebook\n+\n+πŸ€— Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add:\n+\n+```python\n+from accelerate import notebook_launcher\n+\n+notebook_launcher(training_function)\n+```\n+\n+An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb)\n+\n+Note that this launcher does not work in Jupyter Notebook on a machine with multiple GPUs (yet). This feature will be released in a later version.\n+\n ## Why should I use πŸ€— Accelerate?\n \n You should use πŸ€— Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library, In fact the whole API of πŸ€— Accelerate is in one class, the `Accelerator` object.\ndiff --git a/docs/source/launcher.rst b/docs/source/launcher.rst\nindex 1b0520d1b..da28ad920 100644\n--- a/docs/source/launcher.rst\n+++ b/docs/source/launcher.rst\n@@ -19,6 +19,9 @@ Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kagg\n but support for training on several GPUs (if the machine on which you are running your notebook has them) is planned\n for a future release.\n \n+An example can be found in `this notebook\n+<https://github.com/huggingface/notebooks/blob/master/examples/accelerate/simple_nlp_example.ipynb>`__.\n+\n .. warning::\n \n If you are training on Colab or a Kaggle kernel with TPUs, your :obj:`Accelerator` object should only be defined\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/52", "pr_id": 623785568 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex 56ba51f64..c8bf1bcd7 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -46,7 +46,7 @@ def get_cluster_input():\n main_process_ip = _ask_field(\n \"What is the IP address of the machine that will host the main process? \",\n )\n- main_process_ip = _ask_field(\n+ main_process_port = _ask_field(\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/50", "pr_id": 623305806 }, { "diff": "diff --git a/docs/source/index.rst b/docs/source/index.rst\nindex ceef4fc91..fb58d1e04 100644\n--- a/docs/source/index.rst\n+++ b/docs/source/index.rst\n@@ -155,5 +155,6 @@ Supported integrations\n :caption: API reference\n \n accelerator\n+ launcher\n kwargs\n internal\ndiff --git a/docs/source/launcher.rst b/docs/source/launcher.rst\nnew file mode 100644\nindex 000000000..1b0520d1b\n--- /dev/null\n+++ b/docs/source/launcher.rst\n@@ -0,0 +1,27 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+\n+Notebook Launcher\n+=======================================================================================================================\n+\n+Launch your training function inside a notebook. Currently supports launching a training with TPUs on [Google\n+Colab](https://colab.research.google.com/) and [Kaggle kernels](https://www.kaggle.com/code), or training on one GPU,\n+but support for training on several GPUs (if the machine on which you are running your notebook has them) is planned\n+for a future release.\n+\n+.. warning::\n+\n+ If you are training on Colab or a Kaggle kernel with TPUs, your :obj:`Accelerator` object should only be defined\n+ inside the training function. This is because the initialization should be done inside the launcher only.\n+\n+.. autofunction:: accelerate.notebook_launcher\ndiff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 5e3d9ac17..d9846b777 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -195,6 +195,29 @@ If you stored the config file in a non-default location, you can indicate it to\n You can also override any of the arguments determined by your config file, see TODO: insert ref here.\n \n \n+Launching training from a notebook\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+In Accelerate 0.3.0, a new :class:`~accelerate.notebook_launcher` has been introduced to help you launch your training\n+function from a notebook. Currently supports launching a training with TPUs on Colab, or training on one GPU, but\n+support for training on several GPUs (if the machine on which you are running your notebook has them) is planned for a\n+future release.\n+\n+Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a\n+cell with the following code:\n+\n+.. code-block::\n+\n+ from accelerate import notebook_launcher\n+\n+ notebook_launcher(training_function)\n+\n+.. warning::\n+\n+ If you are training on Colab with TPUs, your :obj:`Accelerator` object should only be defined inside the training\n+ function. This is because the initialization should be done inside the launcher only.\n+\n+\n Training on TPU\n -----------------------------------------------------------------------------------------------------------------------\n \ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex d005e1235..8edc050c3 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -6,5 +6,6 @@\n \n from .accelerator import Accelerator\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs\n+from .notebook_launcher import notebook_launcher\n from .state import DistributedType\n from .utils import synchronize_rng_states\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 9e3e7a566..f1242d3fd 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -16,7 +16,6 @@\n \n import argparse\n import importlib\n-import inspect\n import os\n import subprocess\n import sys\n@@ -27,15 +26,7 @@\n from accelerate.commands.config import default_config_file, load_config_from_file\n from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.state import ComputeEnvironment, DistributedType\n-from accelerate.utils import is_sagemaker_available\n-\n-\n-class _AddOneArg:\n- def __init__(self, launcher):\n- self.launcher = launcher\n-\n- def __call__(self, index):\n- self.launcher()\n+from accelerate.utils import PrepareForLaunch, is_sagemaker_available\n \n \n def launch_command_parser(subparsers=None):\n@@ -176,12 +167,7 @@ def tpu_launcher(args):\n # Patch sys.argv\n sys.argv = [args.training_script] + args.training_script_args\n \n- # If the function does not take one argument, launch will fail\n- launcher_sig = inspect.signature(main_function)\n- if len(launcher_sig.parameters) == 0:\n- xmp.spawn(_AddOneArg(main_function), args=(), nprocs=args.num_processes)\n- else:\n- xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)\n \n \n def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\ndiff --git a/src/accelerate/notebook_launcher.py b/src/accelerate/notebook_launcher.py\nnew file mode 100644\nindex 000000000..e9dce7677\n--- /dev/null\n+++ b/src/accelerate/notebook_launcher.py\n@@ -0,0 +1,79 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import warnings\n+\n+import torch\n+\n+from .state import AcceleratorState\n+from .utils import PrepareForLaunch\n+\n+\n+def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\", **kwargs):\n+ \"\"\"\n+ Launches a training function, using several processes if it's possible in the current environment (TPU with\n+ multiple cores for instance).\n+\n+ Args:\n+ function (:obj:`Callable`):\n+ The training function to execute. If it accepts arguments, the first argument should be the index of the\n+ process run.\n+ args (:obj:`Tuple`):\n+ Tuple of arguments to pass to the function (it will receive :obj:`(index, *args)`).\n+ num_processes (:obj:`int`, `optional`):\n+ The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to\n+ the number of GPUs available otherwise.\n+\n+ .. warning::\n+\n+ Multiple GPUs is not yet supported.\n+ \"\"\"\n+ launcher = PrepareForLaunch(function)\n+\n+ # Are we in a google colab or a Kaggle Kernel?\n+ if \"IPython\" in sys.modules:\n+ in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n+ elif any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\n+ in_colab_or_kaggle = True\n+ else:\n+ in_colab_or_kaggle = False\n+\n+ if in_colab_or_kaggle:\n+ if os.environ.get(\"TPU_NAME\", None) is not None:\n+ # TPU launch\n+ import torch_xla.distributed.xla_multiprocessing as xmp\n+\n+ if len(AcceleratorState._shared_state) > 0:\n+ raise ValueError(\n+ \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\n+ \"your training function. Restart your notebook and make sure no cells initializes an \"\n+ \"`Accelerator`.\"\n+ )\n+ if num_processes is None:\n+ num_processes = 8\n+\n+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n+ else:\n+ # No need for a distributed launch otherwise as it's either CPU or one GPU.\n+ launcher(0, *args)\n+\n+ else:\n+ if num_processes is None:\n+ num_processes = torch.cuda.device_count() if torch.cuda.is_available() else 1\n+\n+ if num_processes > 1:\n+ warnings.warn(\"`notebook_launcher` does not support multiple GPUs yet, launching the training on one GPU.\")\n+ launcher(0, *args)\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 0ac7d04e5..0c2282b2c 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import importlib\n+import inspect\n import random\n from enum import Enum\n from typing import List, Optional, Union\n@@ -247,3 +248,19 @@ def save(obj, f):\n xm.save(obj, f)\n elif AcceleratorState().local_process_index == 0:\n torch.save(obj, f)\n+\n+\n+class PrepareForLaunch:\n+ \"\"\"\n+ Prepare a function that launches a script.\n+ \"\"\"\n+\n+ def __init__(self, launcher):\n+ self.launcher = launcher\n+\n+ def __call__(self, index):\n+ launcher_sig = inspect.signature(self.launcher)\n+ if len(launcher_sig.parameters) == 0:\n+ self.launcher()\n+ else:\n+ self.launcher(index)\n", "code_comments": [ { "body": "```suggestion\r\nLaunch your training function inside a notebook. Currently supports launching a training with TPUs on [Google Colab](https://colab.research.google.com/), or training\r\n```\r\n```suggestion\r\nLaunch your training function from a notebook. Currently supports launching a training with TPUs on Colab, or training\r\n```", "diff_hunk": "@@ -0,0 +1,26 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+\n+Notebook Launcher\n+=======================================================================================================================\n+\n+Launch your training function from a notebook. Currently supports launching a training with TPUs on Colab, or training", "from_author": false }, { "body": "We could add a check for Kaggle here too. I guess users expect the same behavior there. I checked a Kaggle TPU notebook, sadly `sys.modules[\"IPython\"].get_ipython()` is not hinting that the notebook is run on Kaggle. But we could add a check like `if any(key.startswith('KAGGLE') for key in os.environ.keys())`. Additionally here is a dump of the env vars on kaggle\r\n\r\n```python\r\nenviron{'LC_ALL': 'C.UTF-8',\r\n 'LD_LIBRARY_PATH': '/opt/conda/lib',\r\n 'KAGGLE_DATA_PROXY_TOKEN': 'eyJhbGciOiJBMTI4S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0.B5s-CblnZQjaC_KXAizqBneeEiZmD-pS7Asgb2h-jS6uEkmkIIHLYg.ddiwW2QpGlaxa5Q584WQOg.E8mrT0sr5vZsIb5v4zdJf6uvrzk2wuZCaM6g-xLBTkZo_Nx-lLLqcZqUrZixTOznQqeidkc5tJibiyYBeBGcRnwSuFXx0w4V0EJPfKVqp_B5TMbOROvjnLIpX3eoINPjQd7I38nmCeyrlGUnC-aTHLFqDAxaaYmcESykEixt_Hlc7Q_DfiD_uPkEzJs8buJBvcYGFq8FWbJB7n89rkDY-P9xvZnSsFjbiQYtAbfwcKGdxGKYKAoVdqRhsRHjVjj6HDVQU38Ldnuhzjg2kdt8Xhu__YxSXq0Gh-RFLZxP90M.ZPazCS7UZW9RyI3-jniqPA',\r\n 'LANG': 'C.UTF-8',\r\n 'HOSTNAME': '6497510c329c',\r\n 'TESSERACT_PATH': '/usr/bin/tesseract',\r\n 'XRT_TPU_CONFIG': 'tpu_worker;0;10.0.0.2:8470',\r\n 'KAGGLE_KERNEL_INTEGRATIONS': '',\r\n 'PROJ_LIB': '/opt/conda/share/proj',\r\n 'KAGGLE_KERNEL_RUN_TYPE': 'Interactive',\r\n 'MKL_THREADING_LAYER': 'GNU',\r\n 'NO_GCE_CHECK': 'True',\r\n 'PWD': '/kaggle/working',\r\n 'HOME': '/root',\r\n 'DL_ANACONDA_HOME': '/opt/conda',\r\n 'ANACONDA_PYTHON_VERSION': '3.7',\r\n 'KAGGLE_URL_BASE': 'https://www.kaggle.com',\r\n 'KAGGLE_USER_SECRETS_TOKEN': 'eyJhbGciOiJkaXIiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0..kvSYrK_dnCLGYSW8GDnQdg.xlHt52wzWG2SlyvAN6zUq9M2S3m6maqeK_nGH6zCEL-VDZE-CYAC19Ecal8oy7zJQCFYcTDdFGi-Qq6c3tdXinuUy-kG1UQ8MnTSDJJT1AvIWAeQf2MNacS8TMWa7IYG.DLWrZFE6rlAGB7rKowpPKQ',\r\n 'KAGGLE_DOCKER_IMAGE': 'gcr.io/kaggle-images/python:tpu',\r\n 'CONTAINER_NAME': 'base-cpu',\r\n 'KAGGLE_DATA_PROXY_PROJECT': 'kaggle-161607',\r\n 'SHELL': '/bin/bash',\r\n 'MPLBACKEND': 'agg',\r\n 'SHLVL': '1',\r\n 'PYTHONPATH': '/kaggle/lib/kagglegym:/kaggle/lib',\r\n 'CONTAINER_URL': 'gcr.io/deeplearning-platform-release/base-cpu:nightly-2021-03-28',\r\n 'KAGGLE_CONTAINER_NAME': 'kaggle_gq54lOn7oztYNC879IY4P2ItqdI6zTyAKVlolN6KbYE-60764729-webtier',\r\n 'PYTHONUSERBASE': '/root/.local',\r\n 'PATH': '/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',\r\n 'KAGGLE_DATA_PROXY_URL': 'https://dp.kaggle.net',\r\n 'TPU_NAME': 'grpc://10.0.0.2:8470',\r\n '_': '/opt/conda/bin/jupyter',\r\n 'GIT_PYTHON_REFRESH': 'quiet',\r\n 'JPY_PARENT_PID': '9',\r\n 'TERM': 'xterm-color',\r\n 'CLICOLOR': '1',\r\n 'PAGER': 'cat',\r\n 'GIT_PAGER': 'cat'}\r\n``` \r\n\r\nAdditionally, if we add Kaggle we should rename `in_colab` to maybe `in_colab_or_kaggle`.", "diff_hunk": "@@ -0,0 +1,80 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import warnings\n+\n+import torch\n+\n+from .state import AcceleratorState\n+from .utils import PrepareForLaunch\n+\n+\n+def notebook_launcher(function, args=(), num_processes=None, start_method=\"fork\", **kwargs):\n+ \"\"\"\n+ Launches a training function, using several processes if it's possible in the current environment (TPU with\n+ multiple cores for instance).\n+\n+ Args:\n+ function (:obj:`Callable`):\n+ The training function to execute. If it accepts arguments, the first argument should be the index of the\n+ process run.\n+ args (:obj:`Tuple`):\n+ Tuple of arguments to pass to the function (it will receive :obj:`(index, *args)`).\n+ num_processes (:obj:`int`, `optional`):\n+ The number of processes to use for training. Will default to 8 in Colab if a TPU is available, to the\n+ number of GPUs available otherwise.\n+ start_method (:obj:`str`, `optional`, defaults to :obj:`\"fork\"`):\n+ The method to use to start the various processes\n+ kwargs:\n+ Additional keyword arguments passed along to the multiprocess :obj:`spawn` method.\n+\n+ .. warning::\n+\n+ Multiple GPUs is not yet supported.\n+ \"\"\"\n+ launcher = PrepareForLaunch(function)\n+\n+ # Are we in a google colab?\n+ if \"IPython\" in sys.modules:\n+ in_colab = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n+ else:", "from_author": false } ], "context": [ { "body": "Yes, notebook examples will follow! Working on it this afternoon and hoping to have most NLP tasks by the end of the week!", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/44", "pr_id": 622263859 }, { "diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 55d00ab5d..63b53eccb 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -14,6 +14,8 @@\n \n import torch\n \n+from packaging import version\n+\n from .state import AcceleratorState, DistributedType, is_tpu_available\n \n \n@@ -86,8 +88,18 @@ def load_state_dict(self, state_dict):\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n \n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n", "code_comments": [ { "body": "if set_to_none is None:", "diff_hunk": "@@ -86,8 +88,18 @@ def load_state_dict(self, state_dict):\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:", "from_author": false }, { "body": "Good catch!", "diff_hunk": "@@ -86,8 +88,18 @@ def load_state_dict(self, state_dict):\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:", "from_author": true } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/43", "pr_id": 622247621 }, { "diff": "diff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 382636253..903f2b0e0 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -66,10 +66,10 @@ def launch_command_parser(subparsers=None):\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n )\n parser.add_argument(\n- \"--num_machines\", type=int, default=1, help=\"The total number of machines used in this training.\"\n+ \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\n )\n parser.add_argument(\n- \"--machine_rank\", type=int, default=0, help=\"The rank of the machine on which this script is launched.\"\n+ \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\n )\n parser.add_argument(\"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\")\n parser.add_argument(\n@@ -298,6 +298,16 @@ def launch_command(args):\n if not args.multi_gpu and not args.tpu:\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n+ # Update args with the defaults\n+ for name, attr in defaults.__dict__.items():\n+ # Those args are handled separately\n+ if (\n+ name not in [\"compute_environment\", \"fp16\", \"distributed_type\"]\n+ and getattr(args, name, None) is None\n+ ):\n+ setattr(args, name, attr)\n+\n if args.num_processes is None and defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n args.num_processes = defaults.num_processes\n if not args.fp16:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/38", "pr_id": 621191010 }, { "diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nindex a9e412f7d..56ba51f64 100644\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -58,22 +58,22 @@ def get_cluster_input():\n else:\n main_training_function = \"main\"\n \n- num_processes = _ask_field(\n- \"How many processes in total will you use? [1]: \",\n- lambda x: int(x),\n- default=1,\n- error_message=\"Please enter an integer.\",\n- )\n+ num_processes = _ask_field(\n+ \"How many processes in total will you use? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n \n- if distributed_type != DistributedType.TPU:\n- fp16 = _ask_field(\n- \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n- _convert_yes_no_to_bool,\n- default=False,\n- error_message=\"Please enter yes or no.\",\n- )\n- else:\n- fp16 = False\n+ if distributed_type != DistributedType.TPU:\n+ fp16 = _ask_field(\n+ \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ else:\n+ fp16 = False\n \n return ClusterConfig(\n compute_environment=ComputeEnvironment.LOCAL_MACHINE,\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/35", "pr_id": 619582633 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 1d56f9e4b..5e3d9ac17 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -199,18 +199,18 @@ Training on TPU\n -----------------------------------------------------------------------------------------------------------------------\n \n If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs\n-will create a graph of all the operations happening im your training step (forward pass, backward pass and optimizer\n+will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer\n step). This is why your first step of training will always be very long as building and compiling this graph for\n optimizations takes some time.\n \n The good news is that this compilation will be cached so the second step and all the following will be much faster. The\n bas news is that it only applies if all of your steps do exactly the same operations, which implies:\n \n-- having all tensors of the same length in all your lenghts\n-- having static code (i.e., not a foor loop of length that could change from step to step)\n+- having all tensors of the same length in all your lengths\n+- having static code (i.e., not a for loop of length that could change from step to step)\n \n Having any of the things above change between two steps will trigger a new compilation which will, once again, take a\n-lof of time. In practice, that means you must take special care to have all your tensors in your inputs of the same\n+lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same\n shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layer with for loops that\n have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\n \ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 75539559e..0143ac949 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -280,7 +280,7 @@ def gather(self, tensor):\n \n Args:\n tensor (:obj:`torch.Tensor`, or a nested tuple/list/dictionary of :obj:`torch.Tensor`):\n- The tensors to gather accross all processes.\n+ The tensors to gather across all processes.\n \n Returns:\n :obj:`torch.Tensor`, or a nested tuple/list/dictionary of :obj:`torch.Tensor`: The gathered tensor(s). Note\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 24a7ad08a..7a8422a05 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -271,7 +271,7 @@ class DataLoaderShard(DataLoader):\n - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n - :obj:`\"generator\"`: an optional :obj:`torch.Generator`\n generator (:obj:`torch.Generator`, `optional`):\n- A random number generator to keep synchronized accross processes.\n+ A random number generator to keep synchronized across processes.\n kwargs:\n All other keyword arguments to pass to the regular :obj:`DataLoader` initialization.\n \"\"\"\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/33", "pr_id": 618706134 }, { "diff": "diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex a05debfda..0e115bdd4 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -44,7 +44,7 @@ def load_config_from_file(config_file):\n if config_file.endswith(\".json\"):\n if (\n json.load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE)\n- is ComputeEnvironment.LOCAL_MACHINE\n+ == ComputeEnvironment.LOCAL_MACHINE\n ):\n config_class = ClusterConfig\n else:\n@@ -53,7 +53,7 @@ def load_config_from_file(config_file):\n else:\n if (\n yaml.safe_load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE)\n- is ComputeEnvironment.LOCAL_MACHINE\n+ == ComputeEnvironment.LOCAL_MACHINE\n ):\n config_class = ClusterConfig\n else:\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/31", "pr_id": 617956141 }, { "diff": "diff --git a/examples/README.md b/examples/README.md\nindex be636e63c..434a230ea 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -28,7 +28,7 @@ pip install datasets, transformers\n \n The same script can be run in any of the following configurations:\n - single CPU or single GPU\n-- multi GPUS (using PyTorch distributed mode)\n+- multi GPUs (using PyTorch distributed mode)\n - (multi) TPUs\n - fp16 (mixed-precision) or fp32 (normal precision)\n \n@@ -57,8 +57,8 @@ To run it in each of these various modes, use the following commands:\n ```\n * from any server with Accelerate launcher\n ```bash\n- accelerate launch --fb16 ./nlp_example.py\n-- multi GPUS (using PyTorch distributed mode)\n+ accelerate launch --fp16 ./nlp_example.py\n+- multi GPUs (using PyTorch distributed mode)\n * With Accelerate config and launcher\n ```bash\n accelerate config # This will create a config file on your server\n@@ -103,7 +103,7 @@ The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a R\n \n The same script can be run in any of the following configurations:\n - single CPU or single GPU\n-- multi GPUS (using PyTorch distributed mode)\n+- multi GPUs (using PyTorch distributed mode)\n - (multi) TPUs\n - fp16 (mixed-precision) or fp32 (normal precision)\n \n@@ -145,8 +145,8 @@ To run it in each of these various modes, use the following commands:\n ```\n * from any server with Accelerate launcher\n ```bash\n- accelerate launch --fb16 ./cv_example.py --data_dir path_to_data\n-- multi GPUS (using PyTorch distributed mode)\n+ accelerate launch --fp16 ./cv_example.py --data_dir path_to_data\n+- multi GPUs (using PyTorch distributed mode)\n * With Accelerate config and launcher\n ```bash\n accelerate config # This will create a config file on your server\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/28", "pr_id": 617200861 }, { "diff": "diff --git a/docs/source/index.rst b/docs/source/index.rst\nindex efdeb8f88..ceef4fc91 100644\n--- a/docs/source/index.rst\n+++ b/docs/source/index.rst\n@@ -144,6 +144,11 @@ Supported integrations\n quicktour\n installation\n \n+.. toctree::\n+ :maxdepth: 2\n+ :caption: Guides\n+\n+ sagemaker\n \n .. toctree::\n :maxdepth: 2\ndiff --git a/docs/source/sagemaker.rst b/docs/source/sagemaker.rst\nnew file mode 100644\nindex 000000000..b889c99b3\n--- /dev/null\n+++ b/docs/source/sagemaker.rst\n@@ -0,0 +1,169 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+Amazon SageMaker\n+=======================================================================================================================\n+\n+Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n+<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n+make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n+<https://aws.amazon.com/sagemaker/>`_.\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+\n+\n+Getting Started\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Setup & Installation\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`__.\n+\n+After you have your AWS Account you need to install the ``sagemaker`` sdk for πŸ€— Accelerate with.\n+\n+.. code-block:: \n+\n+ pip install \"accelerate[sagemaker]\" --upgrade\n+\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. πŸ€—\n+Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a\n+``requirements.txt`` in the same directory where your training script is located and add it as dependency.\n+\n+.. code-block:: \n+\n+ accelerate\n+\n+You should also add any other dependencies you have to this ``requirements.txt``.\n+\n+\n+Configure πŸ€— Accelerate\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n+the πŸ€— Accelerate CLI.\n+\n+.. code-block:: \n+\n+ accelerate config\n+ # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.\n+\n+.. note::\n+ πŸ€— Accelerate is not saving any of your credentials.\n+\n+\n+Prepare a πŸ€— Accelerate fine-tuning script\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n+after training you need to specify either ``/opt/ml/model`` or use ``os.environ[\"SM_MODEL_DIR\"]`` as your save\n+directory. After training, artifacts in this directory are uploaded to S3.\n+\n+\n+.. code-block:: diff\n+\n+ - torch.save('/opt/ml/model`)\n+ + accelerator.save('/opt/ml/model')\n+\n+\n+.. warning::\n+ SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n+ specify type as bool in your script and provide an explicit True or False value for this hyperparameter. `[REF]\n+ <https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script>`__.\n+\n+\n+Launch Training\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+You can launch your training with πŸ€— Accelerate CLI with\n+\n+.. code-block:: \n+\n+ accelerate launch path_to_script.py --args_to_the_script\n+\n+\n+This will launch your training script using your configuration. The only thing you have to do is provide all the\n+arguments needed by your training script as named arguments.\n+\n+**Examples**\n+\n+.. note::\n+ If you run one of the example scripts, don't forget to add ``accelerator.save('/opt/ml/model')`` to it.\n+\n+.. code-block:: \n+\n+ accelerate launch ./examples/sagemaker_example.py \n+\n+\n+Outputs:\n+\n+.. code-block:: \n+\n+ Configuring Amazon SageMaker environment\n+ Converting Arguments to Hyperparameters\n+ Creating Estimator\n+ 2021-04-08 11:56:50 Starting - Starting the training job...\n+ 2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress\n+ .........\n+ 2021-04-08 11:58:54 Starting - Preparing the instances for training.........\n+ 2021-04-08 12:00:24 Downloading - Downloading input data\n+ 2021-04-08 12:00:24 Training - Downloading the training image..................\n+ 2021-04-08 12:03:39 Training - Training image download completed. Training in progress..\n+ ........\n+ epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037}\n+ epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689}\n+ epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304}\n+ ........\n+ 2021-04-08 12:05:40 Uploading - Uploading generated training model\n+ 2021-04-08 12:05:40 Completed - Training job completed\n+ Training seconds: 331\n+ Billable seconds: 331\n+ You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz\n+\n+\n+\n+Advanced Features\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Distributed Training: Data Parallelism\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+*currently in development, will be supported soon.*\n+\n+Distributed Training: Model Parallelism\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+*currently in development, will be supported soon.*\n+\n+Python packages and dependencies\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. If you\n+want to use different/other Python packages you can do this by adding them to the ``requirements.txt``. These packages\n+will be installed before your training script is started.\n+\n+Remote scripts: Use scripts located on Github\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+*undecided if feature is needed. Contact us if you would like this feature.*\n+\n+Use Spot Instances\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+*undecided if feature is needed. Contact us if you would like this feature.*\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex aad2b8b65..220c6ddcc 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -162,8 +162,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n- parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\"--fp16\", type=bool, default=False, help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", type=bool, default=False, help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\ndiff --git a/examples/requirements.txt b/examples/requirements.txt\nnew file mode 100644\nindex 000000000..65b6b04c3\n--- /dev/null\n+++ b/examples/requirements.txt\n@@ -0,0 +1 @@\n+accelerate # used to be installed in Amazon SageMaker environment\n\\ No newline at end of file\ndiff --git a/setup.py b/setup.py\nindex e2ce2cfbc..b2700c015 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n ]\n },\n python_requires=\">=3.6.0\",\n- install_requires=[\"torch>=1.4.0\"],\n+ install_requires=[\"torch>=1.4.0\", \"pyaml>=20.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex fe2cb122a..a05debfda 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -42,13 +42,19 @@ def load_config_from_file(config_file):\n config_file = config_file if config_file is not None else default_config_file\n with open(config_file, \"r\", encoding=\"utf-8\") as f:\n if config_file.endswith(\".json\"):\n- if json.load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE):\n+ if (\n+ json.load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE)\n+ is ComputeEnvironment.LOCAL_MACHINE\n+ ):\n config_class = ClusterConfig\n else:\n config_class = SageMakerConfig\n return config_class.from_json_file(json_file=config_file)\n else:\n- if yaml.safe_load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE):\n+ if (\n+ yaml.safe_load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE)\n+ is ComputeEnvironment.LOCAL_MACHINE\n+ ):\n config_class = ClusterConfig\n else:\n config_class = SageMakerConfig\n@@ -121,5 +127,5 @@ class SageMakerConfig(BaseConfig):\n region: str = \"us-east-1\"\n num_machines: int = 1\n base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\n- pytroch_version: str = \"1.6\"\n+ pytorch_version: str = \"1.6\"\n transformers_version: str = \"4.4\"\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nindex 25fd3e6cb..feff0aecf 100644\n--- a/src/accelerate/commands/config/sagemaker.py\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -97,7 +97,8 @@ def get_sagemaker_input():\n os.environ[\"AWS_PROFILE\"] = aws_profile\n else:\n print(\n- \"Accelerate will expose AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables on the machine\"\n+ \"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,\"\n+ \"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`\"\n )\n aws_access_key_id = _ask_field(\"AWS Access Key ID: \")\n os.environ[\"AWS_ACCESS_KEY_ID\"] = aws_access_key_id\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex cd3ae65cc..382636253 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -20,10 +20,14 @@\n import os\n import subprocess\n import sys\n+from ast import literal_eval\n from pathlib import Path\n+from typing import Dict, List\n \n from accelerate.commands.config import default_config_file, load_config_from_file\n+from accelerate.commands.config.config_args import SageMakerConfig\n from accelerate.state import ComputeEnvironment, DistributedType\n+from accelerate.utils import is_sagemaker_available\n \n \n class _AddOneArg:\n@@ -80,6 +84,18 @@ def launch_command_parser(subparsers=None):\n default=None,\n help=\"The name of the main function to be executed in your script (only for TPU training).\",\n )\n+ parser.add_argument(\n+ \"--aws_access_key_id\",\n+ type=str,\n+ default=None,\n+ help=\"The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\",\n+ )\n+ parser.add_argument(\n+ \"--aws_secret_access_key\",\n+ type=str,\n+ default=None,\n+ help=\"The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job\",\n+ )\n parser.add_argument(\n \"training_script\",\n type=str,\n@@ -168,11 +184,107 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ # raise an error if element is store_true or store_false\n+ raise ValueError(\n+ \"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types\"\n+ )\n+ else: # raise an error if last element is store_true or store_false\n+ raise ValueError(\n+ \"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types\"\n+ )\n+ # adds argument to parser based on action_store true\n+ if action is None:\n+ parser.add_argument(argument, type=_infer_type)\n+ else:\n+ parser.add_argument(argument, action=action)\n+\n+ return {\n+ key: (literal_eval(value) if value == \"True\" or value == \"False\" else value)\n+ for key, value in parser.parse_args(nargs).__dict__.items()\n+ }\n+\n+\n+def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n+ if not is_sagemaker_available():\n+ raise ImportError(\n+ \"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`\"\n+ )\n+ from sagemaker.huggingface import HuggingFace\n+\n+ # configure environment\n+ print(\"Configuring Amazon SageMaker environment\")\n+ os.environ[\"AWS_DEFAULT_REGION\"] = sagemaker_config.region\n+\n+ # configure credentials\n+ if sagemaker_config.profile is not None:\n+ os.environ[\"AWS_PROFILE\"] = sagemaker_config.profile\n+ elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None:\n+ os.environ[\"AWS_ACCESS_KEY_ID\"] = args.aws_access_key_id\n+ os.environ[\"AWS_SECRET_ACCESS_KEY\"] = args.aws_secret_access_key\n+ else:\n+ raise EnvironmentError(\n+ \"You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile\"\n+ )\n+\n+ # extract needed arguments\n+ source_dir = os.path.dirname(args.training_script)\n+ if not source_dir: # checks if string is empty\n+ source_dir = \".\"\n+ entry_point = os.path.basename(args.training_script)\n+ if not entry_point.endswith(\".py\"):\n+ raise ValueError(f'Your training script should be a python script and not \"{entry_point}\"')\n+\n+ print(\"Converting Arguments to Hyperparameters\")\n+ hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n+\n+ environment = {\"USE_FP16\": args.fp16} # Environment variables to be set for use during training job\n+\n+ # configure distribution set up\n+ distribution = None # TODO: not yet implemented\n+\n+ # configure session\n+ print(\"Creating Estimator\")\n+ huggingface_estimator = HuggingFace(\n+ entry_point=entry_point,\n+ source_dir=source_dir,\n+ role=sagemaker_config.iam_role_name,\n+ transformers_version=\"4.4\",\n+ pytorch_version=\"1.6\",\n+ py_version=\"py36\",\n+ base_job_name=sagemaker_config.base_job_name,\n+ instance_count=sagemaker_config.num_machines,\n+ instance_type=sagemaker_config.ec2_instance_type,\n+ debugger_hook_config=False,\n+ distribution=distribution,\n+ hyperparameters=hyperparameters,\n+ environment=environment,\n )\n \n+ huggingface_estimator.fit()\n+ print(f\"You can find your model data at: {huggingface_estimator.model_data}\")\n+\n \n def launch_command(args):\n # Sanity checks\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 21b635996..68299c790 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -31,6 +31,10 @@ def is_boto3_available():\n return importlib.util.find_spec(\"boto3\") is not None\n \n \n+def is_sagemaker_available():\n+ return importlib.util.find_spec(\"sagemaker\") is not None\n+\n+\n class RNGType(Enum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\ndiff --git a/tests/test_sagemaker.py b/tests/test_sagemaker.py\nnew file mode 100644\nindex 000000000..f2d587a39\n--- /dev/null\n+++ b/tests/test_sagemaker.py\n@@ -0,0 +1,62 @@\n+import unittest\n+from dataclasses import dataclass\n+\n+import pytest\n+from accelerate.commands.config.config_args import SageMakerConfig\n+from accelerate.commands.launch import _convert_nargs_to_dict\n+from accelerate.state import ComputeEnvironment\n+\n+\n+@dataclass\n+class MockLaunchConfig(SageMakerConfig):\n+ compute_environment = ComputeEnvironment.AMAZON_SAGEMAKER\n+ fp16 = True\n+ ec2_instance_type = \"ml.p3.2xlarge\"\n+ iam_role_name = \"accelerate_sagemaker_execution_role\"\n+ profile = \"hf-sm\"\n+ region = \"us-east-1\"\n+ num_machines = 1\n+ base_job_name = \"accelerate-sagemaker-1\"\n+ pytorch_version = \"1.6\"\n+ transformers_version = \"4.4\"\n+ training_script = \"train.py\"\n+ success_training_script_args = [\n+ \"--model_name_or_path\",\n+ \"bert\",\n+ \"--do_train\",\n+ \"False\",\n+ \"--epochs\",\n+ \"3\",\n+ \"--learning_rate\",\n+ \"5e-5\",\n+ \"--max_steps\",\n+ \"50.5\",\n+ ]\n+ fail_training_script_args = [\n+ \"--model_name_or_path\",\n+ \"bert\",\n+ \"--do_train\",\n+ \"--do_test\",\n+ \"False\",\n+ \"--do_predict\",\n+ \"--epochs\",\n+ \"3\",\n+ \"--learning_rate\",\n+ \"5e-5\",\n+ \"--max_steps\",\n+ \"50.5\",\n+ ]\n+\n+\n+class SageMakerLaunch(unittest.TestCase):\n+ def test_args_convert(self):\n+ # If no defaults are changed, `to_kwargs` returns an empty dict.\n+ converted_args = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)\n+ assert isinstance(converted_args[\"model_name_or_path\"], str)\n+ assert isinstance(converted_args[\"do_train\"], bool)\n+ assert isinstance(converted_args[\"epochs\"], int)\n+ assert isinstance(converted_args[\"learning_rate\"], float)\n+ assert isinstance(converted_args[\"max_steps\"], float)\n+\n+ with pytest.raises(ValueError):\n+ _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)\n", "code_comments": [ { "body": "```suggestion\r\n## Getting Started\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+<!---\n+Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# Amazon SageMaker\n+\n+Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+---\n+\n+## Getting Started: Train a πŸ€— Transformers Model", "from_author": false }, { "body": "```suggestion\r\nπŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. πŸ€— Accelerate is not in the DLC yet (swill soon be added!) so to use it within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency.\r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+<!---\n+Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# Amazon SageMaker\n+\n+Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+---\n+\n+## Getting Started: Train a πŸ€— Transformers Model\n+\n+### Setup & Installation\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). \n+\n+After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with. \n+\n+```python\n+pip install \"accelerate[sagemaker]\" --upgrade\n+```\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. To be able to use πŸ€— Accelerate within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency.", "from_author": false }, { "body": "```suggestion\r\nYou should also add any other dependencies you have to this `requirements.txt`. \r\n```", "diff_hunk": "@@ -0,0 +1,134 @@\n+<!---\n+Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# Amazon SageMaker\n+\n+Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+---\n+\n+## Getting Started: Train a πŸ€— Transformers Model\n+\n+### Setup & Installation\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). \n+\n+After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with. \n+\n+```python\n+pip install \"accelerate[sagemaker]\" --upgrade\n+```\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. To be able to use πŸ€— Accelerate within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency.\n+\n+```python\n+accelerate\n+```\n+\n+You can also add other dependencies to this `requirements.txt`. ", "from_author": false }, { "body": "What do you mean here? Shouldn't it be in the example?", "diff_hunk": "@@ -0,0 +1,134 @@\n+<!---\n+Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# Amazon SageMaker\n+\n+Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+---\n+\n+## Getting Started: Train a πŸ€— Transformers Model\n+\n+### Setup & Installation\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). \n+\n+After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with. \n+\n+```python\n+pip install \"accelerate[sagemaker]\" --upgrade\n+```\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. To be able to use πŸ€— Accelerate within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency.\n+\n+```python\n+accelerate\n+```\n+\n+You can also add other dependencies to this `requirements.txt`. \n+\n+\n+### Configure πŸ€— Accelerate \n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with the πŸ€— Accelerate CLI. \n+\n+```bash\n+accelerate config\n+# In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+```\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. \n+\n+_**Note**: πŸ€— Accelerate is not saving any of your credentials._\n+\n+\n+### Prepare a πŸ€— Accelerate fine-tuning script.\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model after training you need to specify either `/opt/ml/model` or use `os.environ[\"SM_MODEL_DIR\"]` as your save directory. After training, artifacts in this directory are uploaded to S3.\n+\n+```diff\n+- torch.save()\n++ accelerator.save('/opt/ml/model')\n+```\n+\n+### Launch Training\n+\n+You can launch your training with πŸ€— Accelerate CLI with\n+\n+```bash\n+accelerate launch path_to_script.py --args_to_the_script\n+```\n+\n+This will launch your training script using your configuration. The only thing you have to do is provide all the arguments needed by your training script as named arguments.\n+\n+**Examples**\n+\n+_**Note**: If you run one of the example scripts, don't for get to add `accelerator.save('/opt/ml/model')`to it._", "from_author": false }, { "body": "I see you did something for those later on, is it still necessary? We should make sure we support those as they are very common, or if they are not, it should be made clear in the docs.", "diff_hunk": "@@ -162,8 +162,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n- parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\"--fp16\", type=bool, default=False, help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", type=bool, default=False, help=\"If passed, will train on the CPU.\")", "from_author": false }, { "body": "Should we amke one folder for each example? The CV example would require more.", "diff_hunk": "@@ -0,0 +1 @@\n+accelerate # used to be installed in Amazon SageMaker environment", "from_author": false }, { "body": "There should be something for `store_false` too.", "diff_hunk": "@@ -168,11 +184,100 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ action = \"store_true\"\n+ else: # action_true parameter is last element\n+ action = \"store_true\"", "from_author": false }, { "body": "The current example doesn't show how to save models at all. So that's why I added it here.", "diff_hunk": "@@ -0,0 +1,134 @@\n+<!---\n+Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+Licensed under the Apache License, Version 2.0 (the \"License\");\n+you may not use this file except in compliance with the License.\n+You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+-->\n+\n+# Amazon SageMaker\n+\n+Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+---\n+\n+## Getting Started: Train a πŸ€— Transformers Model\n+\n+### Setup & Installation\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). \n+\n+After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with. \n+\n+```python\n+pip install \"accelerate[sagemaker]\" --upgrade\n+```\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. To be able to use πŸ€— Accelerate within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency.\n+\n+```python\n+accelerate\n+```\n+\n+You can also add other dependencies to this `requirements.txt`. \n+\n+\n+### Configure πŸ€— Accelerate \n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with the πŸ€— Accelerate CLI. \n+\n+```bash\n+accelerate config\n+# In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+```\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. \n+\n+_**Note**: πŸ€— Accelerate is not saving any of your credentials._\n+\n+\n+### Prepare a πŸ€— Accelerate fine-tuning script.\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model after training you need to specify either `/opt/ml/model` or use `os.environ[\"SM_MODEL_DIR\"]` as your save directory. After training, artifacts in this directory are uploaded to S3.\n+\n+```diff\n+- torch.save()\n++ accelerator.save('/opt/ml/model')\n+```\n+\n+### Launch Training\n+\n+You can launch your training with πŸ€— Accelerate CLI with\n+\n+```bash\n+accelerate launch path_to_script.py --args_to_the_script\n+```\n+\n+This will launch your training script using your configuration. The only thing you have to do is provide all the arguments needed by your training script as named arguments.\n+\n+**Examples**\n+\n+_**Note**: If you run one of the example scripts, don't for get to add `accelerator.save('/opt/ml/model')`to it._", "from_author": true }, { "body": "SageMaker cannot pass `store_true` or `store_false` to a script executed. \r\n\r\n> Note that SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to specify type as bool in your script and provide an explicit True or False value for this hyperparameter when instantiating PyTorch Estimator.\r\n\r\nhttps://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script\r\n\r\nshould we add it to the documentation then? ", "diff_hunk": "@@ -162,8 +162,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n- parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\"--fp16\", type=bool, default=False, help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", type=bool, default=False, help=\"If passed, will train on the CPU.\")", "from_author": true }, { "body": "for the CV example to work we would only need to add `timm`. `torchvision` should be inside the DLC since it uses the AWS PyTorch DLC as base\r\n", "diff_hunk": "@@ -0,0 +1 @@\n+accelerate # used to be installed in Amazon SageMaker environment", "from_author": true }, { "body": "puh. I that even possible? like how can someone cloud tell the difference between `store_true` and `store_false` without knowing the script... ", "diff_hunk": "@@ -168,11 +184,100 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ action = \"store_true\"\n+ else: # action_true parameter is last element\n+ action = \"store_true\"", "from_author": true }, { "body": "Yes, it should be properly documented.", "diff_hunk": "@@ -162,8 +162,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n- parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\"--fp16\", type=bool, default=False, help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", type=bool, default=False, help=\"If passed, will train on the CPU.\")", "from_author": false }, { "body": "I have no idea what you're doing it here then. Also I though the script could not have `--store_true`/`--store_false`\r\n\r\nOn top of the documentation, we should add a nice error message here if the parser has arguments with `--store_true`/`--store_false`.", "diff_hunk": "@@ -168,11 +184,100 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ action = \"store_true\"\n+ else: # action_true parameter is last element\n+ action = \"store_true\"", "from_author": false }, { "body": "I reworked the documentation to use `.rst` to highlight it as a warning. ([see PR comment](https://github.com/huggingface/accelerate/pull/26#issuecomment-818525901))", "diff_hunk": "@@ -162,8 +162,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n- parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ parser.add_argument(\"--fp16\", type=bool, default=False, help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", type=bool, default=False, help=\"If passed, will train on the CPU.\")", "from_author": true }, { "body": "For SageMaker to work I needed to convert the `args.training_script_args` from `--foo bar --john 2 --store_true --another key` to\r\n ```python\r\n{\r\n\"foo\":\"bar\",\r\n\"john\": 2,\r\n\"store_true\": True,\r\n\"another\": \"key\"\r\n}\r\n```\r\nTo achieve this I built this dynamic args parser, which takes the `args.training_script_args` and creates an `argsparser` out of it to apply afterward on `args.training_script_args` to create this `dict`. \r\n\r\nWhile creating this I haven't thought of `store_false`, since I never have seen it in real use, that's why I expected that there is only `store_true` and implemented it like that. \r\n", "diff_hunk": "@@ -168,11 +184,100 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ action = \"store_true\"\n+ else: # action_true parameter is last element\n+ action = \"store_true\"", "from_author": true }, { "body": "Should we add `timm` to the `requirements.txt` to make both examples runnable out of the box?", "diff_hunk": "@@ -0,0 +1 @@\n+accelerate # used to be installed in Amazon SageMaker environment", "from_author": true }, { "body": "As discussed I adjusted the convert script to raise an error if someone uses `store` actions and added a case into the test. ", "diff_hunk": "@@ -168,11 +184,100 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ action = \"store_true\"\n+ else: # action_true parameter is last element\n+ action = \"store_true\"", "from_author": true }, { "body": "```suggestion\r\nhave an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`__.\r\n```", "diff_hunk": "@@ -0,0 +1,169 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+Amazon SageMaker\n+=======================================================================================================================\n+\n+Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n+<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n+make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n+<https://aws.amazon.com/sagemaker/>`_.\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+\n+\n+Getting Started\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Setup & Installation\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`_.", "from_author": false }, { "body": "```suggestion\r\n <https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script>`__.\r\n```", "diff_hunk": "@@ -0,0 +1,169 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+Amazon SageMaker\n+=======================================================================================================================\n+\n+Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n+<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n+make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n+<https://aws.amazon.com/sagemaker/>`_.\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+\n+\n+Getting Started\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Setup & Installation\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`_.\n+\n+After you have your AWS Account you need to install the ``sagemaker`` sdk for πŸ€— Accelerate with.\n+\n+.. code-block:: \n+\n+ pip install \"accelerate[sagemaker]\" --upgrade\n+\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. πŸ€—\n+Accelerate is not in the DLC yet (swill soon be added!) so to use it within Amazon SageMaker you need to create a\n+``requirements.txt`` in the same directory where your training script is located and add it as dependency.\n+\n+.. code-block:: \n+\n+ accelerate\n+\n+You should also add any other dependencies you have to this ``requirements.txt``.\n+\n+\n+Configure πŸ€— Accelerate\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n+the πŸ€— Accelerate CLI.\n+\n+.. code-block:: \n+\n+ accelerate config\n+ # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.\n+\n+.. note::\n+ πŸ€— Accelerate is not saving any of your credentials.\n+\n+\n+Prepare a πŸ€— Accelerate fine-tuning script\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n+after training you need to specify either ``/opt/ml/model`` or use ``os.environ[\"SM_MODEL_DIR\"]`` as your save\n+directory. After training, artifacts in this directory are uploaded to S3.\n+\n+\n+.. code-block:: diff\n+\n+ - torch.save()\n+ + accelerator.save('/opt/ml/model')\n+\n+\n+.. warning::\n+ SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n+ specify type as bool in your script and provide an explicit True or False value for this hyperparameter. `[REF]\n+ <https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script>`_", "from_author": false }, { "body": "fp16 might not be accepted by the script so it should not be added like this. It should be added in the env instead, in the variable `\"USE_FP16\"` (which is what accelerate will look for).", "diff_hunk": "@@ -168,11 +184,105 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n-def sagemaker_launcher(sagemaker_config, args):\n- raise NotImplementedError(\n- \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:\n+ if len(nargs) < 0:\n+ return {}\n+ # helper function to infer type for argsparser\n+\n+ def _infer_type(s):\n+ try:\n+ s = float(s)\n+\n+ if s // 1 == s:\n+ return int(s)\n+ return s\n+ except ValueError:\n+ return s\n+\n+ parser = argparse.ArgumentParser()\n+ _, unknown = parser.parse_known_args(nargs)\n+ for index, argument in enumerate(unknown):\n+ if argument.startswith((\"-\", \"--\")):\n+ action = None\n+ if index + 1 < len(unknown): # checks if next index would be in list\n+ if unknown[index + 1].startswith((\"-\", \"--\")): # checks if next element is an key\n+ # raise an error if element is store_true or store_false\n+ raise ValueError(\n+ \"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types\"\n+ )\n+ else: # raise an error if last element is store_true or store_false\n+ raise ValueError(\n+ \"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types\"\n+ )\n+ # adds argument to parser based on action_store true\n+ if action is None:\n+ parser.add_argument(argument, type=_infer_type)\n+ else:\n+ parser.add_argument(argument, action=action)\n+\n+ return {\n+ key: (literal_eval(value) if value == \"True\" or value == \"False\" else value)\n+ for key, value in parser.parse_args(nargs).__dict__.items()\n+ }\n+\n+\n+def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n+ if not is_sagemaker_available():\n+ raise ImportError(\n+ \"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`\"\n+ )\n+ from sagemaker.huggingface import HuggingFace\n+\n+ # configure environment\n+ print(\"Configuring Amazon SageMaker environment\")\n+ os.environ[\"AWS_DEFAULT_REGION\"] = sagemaker_config.region\n+\n+ # configure credentials\n+ if sagemaker_config.profile is not None:\n+ os.environ[\"AWS_PROFILE\"] = sagemaker_config.profile\n+ elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None:\n+ os.environ[\"AWS_ACCESS_KEY_ID\"] = args.aws_access_key_id\n+ os.environ[\"AWS_SECRET_ACCESS_KEY\"] = args.aws_secret_access_key\n+ else:\n+ raise EnvironmentError(\n+ \"You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile\"\n+ )\n+\n+ # extract needed arguments\n+ source_dir = os.path.dirname(args.training_script)\n+ if not source_dir: # checks if string is empty\n+ source_dir = \".\"\n+ entry_point = os.path.basename(args.training_script)\n+ if not entry_point.endswith(\".py\"):\n+ raise ValueError(f'Your training script should be a python script and not \"{entry_point}\"')\n+\n+ print(\"Converting Arguments to Hyperparameters\")\n+ converted_hyperparameters = _convert_nargs_to_dict(args.training_script_args)\n+ hyperparameters = {\"fp16\": args.fp16, **converted_hyperparameters}", "from_author": false }, { "body": "```suggestion\r\nAccelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a\r\n```", "diff_hunk": "@@ -0,0 +1,169 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+Amazon SageMaker\n+=======================================================================================================================\n+\n+Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n+<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n+make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n+<https://aws.amazon.com/sagemaker/>`_.\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+\n+\n+Getting Started\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Setup & Installation\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`__.\n+\n+After you have your AWS Account you need to install the ``sagemaker`` sdk for πŸ€— Accelerate with.\n+\n+.. code-block:: \n+\n+ pip install \"accelerate[sagemaker]\" --upgrade\n+\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. πŸ€—\n+Accelerate is not in the DLC yet (swill soon be added!) so to use it within Amazon SageMaker you need to create a", "from_author": false }, { "body": "I would also add the `/opt/ml/model` here for consistency\r\n```suggestion\r\n - torch.save('/opt/ml/model`)\r\n```", "diff_hunk": "@@ -0,0 +1,169 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+Amazon SageMaker\n+=======================================================================================================================\n+\n+Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n+<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n+make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n+<https://aws.amazon.com/sagemaker/>`_.\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+\n+\n+Getting Started\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Setup & Installation\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`__.\n+\n+After you have your AWS Account you need to install the ``sagemaker`` sdk for πŸ€— Accelerate with.\n+\n+.. code-block:: \n+\n+ pip install \"accelerate[sagemaker]\" --upgrade\n+\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. πŸ€—\n+Accelerate is not in the DLC yet (swill soon be added!) so to use it within Amazon SageMaker you need to create a\n+``requirements.txt`` in the same directory where your training script is located and add it as dependency.\n+\n+.. code-block:: \n+\n+ accelerate\n+\n+You should also add any other dependencies you have to this ``requirements.txt``.\n+\n+\n+Configure πŸ€— Accelerate\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n+the πŸ€— Accelerate CLI.\n+\n+.. code-block:: \n+\n+ accelerate config\n+ # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.\n+\n+.. note::\n+ πŸ€— Accelerate is not saving any of your credentials.\n+\n+\n+Prepare a πŸ€— Accelerate fine-tuning script\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n+after training you need to specify either ``/opt/ml/model`` or use ``os.environ[\"SM_MODEL_DIR\"]`` as your save\n+directory. After training, artifacts in this directory are uploaded to S3.\n+\n+\n+.. code-block:: diff\n+\n+ - torch.save()", "from_author": false }, { "body": "```suggestion\r\n If you run one of the example scripts, don't forget to add ``accelerator.save('/opt/ml/model')`` to it.\r\n```", "diff_hunk": "@@ -0,0 +1,169 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+Amazon SageMaker\n+=======================================================================================================================\n+\n+Hugging Face and Amazon introduced new `Hugging Face Deep Learning Containers (DLCs)\n+<https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers>`_ to\n+make it easier than ever to train Hugging Face Transformer models in `Amazon SageMaker\n+<https://aws.amazon.com/sagemaker/>`_.\n+\n+To learn how to use the new πŸ€— DLCs with the Amazon SageMaker to run your πŸ€— Accelerate scripts and raw training loops.0\n+\n+\n+\n+Getting Started\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+Setup & Installation\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+\n+Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not\n+have an AWS account yet learn more `here <https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html>`__.\n+\n+After you have your AWS Account you need to install the ``sagemaker`` sdk for πŸ€— Accelerate with.\n+\n+.. code-block:: \n+\n+ pip install \"accelerate[sagemaker]\" --upgrade\n+\n+\n+πŸ€— Accelerate currently uses the πŸ€— DLCs, with ``transformers``, ``datasets`` and ``tokenizers`` pre-installed. πŸ€—\n+Accelerate is not in the DLC yet (swill soon be added!) so to use it within Amazon SageMaker you need to create a\n+``requirements.txt`` in the same directory where your training script is located and add it as dependency.\n+\n+.. code-block:: \n+\n+ accelerate\n+\n+You should also add any other dependencies you have to this ``requirements.txt``.\n+\n+\n+Configure πŸ€— Accelerate\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with\n+the πŸ€— Accelerate CLI.\n+\n+.. code-block:: \n+\n+ accelerate config\n+ # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1\n+\n+\n+πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.\n+\n+.. note::\n+ πŸ€— Accelerate is not saving any of your credentials.\n+\n+\n+Prepare a πŸ€— Accelerate fine-tuning script\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+The training script is very similar to a training script you might run outside of SageMaker, but to save your model\n+after training you need to specify either ``/opt/ml/model`` or use ``os.environ[\"SM_MODEL_DIR\"]`` as your save\n+directory. After training, artifacts in this directory are uploaded to S3.\n+\n+\n+.. code-block:: diff\n+\n+ - torch.save()\n+ + accelerator.save('/opt/ml/model')\n+\n+\n+.. warning::\n+ SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to\n+ specify type as bool in your script and provide an explicit True or False value for this hyperparameter. `[REF]\n+ <https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script>`__.\n+\n+\n+Launch Training\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+You can launch your training with πŸ€— Accelerate CLI with\n+\n+.. code-block:: \n+\n+ accelerate launch path_to_script.py --args_to_the_script\n+\n+\n+This will launch your training script using your configuration. The only thing you have to do is provide all the\n+arguments needed by your training script as named arguments.\n+\n+**Examples**\n+\n+.. note::\n+ If you run one of the example scripts, don't for get to add ``accelerator.save('/opt/ml/model')`` to it.", "from_author": false } ], "context": [ { "body": "![screencapture-127-0-0-1-5500-docs-build-html-sagemaker-html-2021-04-13-09_51_47](https://user-images.githubusercontent.com/32632186/114516657-e68efa00-9c3d-11eb-9706-b5c76d21cbc0.png)\r\n", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/26", "pr_id": 612316809 }, { "diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\nindex 7d6210327..016feb77f 100644\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -14,17 +14,18 @@\n #\n import os\n import sys\n-sys.path.insert(0, os.path.abspath('../../src'))\n+\n+sys.path.insert(0, os.path.abspath(\"../../src\"))\n \n \n # -- Project information -----------------------------------------------------\n \n-project = u'accelerate'\n-copyright = u'2020, The Hugging Face Team, Licenced under the Apache License, Version 2.0'\n-author = u'huggingface'\n+project = \"accelerate\"\n+copyright = \"2020, The Hugging Face Team, Licenced under the Apache License, Version 2.0\"\n+author = \"huggingface\"\n \n # The short X.Y version\n-version = u'0.1.0'\n+version = \"0.1.0\"\n \n # -- General configuration ---------------------------------------------------\n \n@@ -36,27 +37,28 @@\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n extensions = [\n- 'sphinx.ext.autodoc',\n- 'sphinx.ext.extlinks',\n- 'sphinx.ext.coverage',\n- 'sphinx.ext.napoleon',\n- 'recommonmark',\n- 'sphinx.ext.viewcode',\n- 'sphinx_markdown_tables',\n- 'sphinx_copybutton'\n+ \"sphinx.ext.autodoc\",\n+ \"sphinx.ext.extlinks\",\n+ \"sphinx.ext.coverage\",\n+ \"sphinx.ext.napoleon\",\n+ \"recommonmark\",\n+ \"sphinx.ext.viewcode\",\n+ \"sphinx_markdown_tables\",\n+ \"sphinx_copybutton\",\n+ \"sphinxext.opengraph\",\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n-templates_path = ['_templates']\n+templates_path = [\"_templates\"]\n \n # The suffix(es) of source filenames.\n # You can specify multiple suffix as a list of string:\n #\n-source_suffix = ['.rst', '.md']\n+source_suffix = [\".rst\", \".md\"]\n # source_suffix = '.rst'\n \n # The master toctree document.\n-master_doc = 'index'\n+master_doc = \"index\"\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n@@ -68,7 +70,7 @@\n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n # This pattern also affects html_static_path and html_extra_path.\n-exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']\n+exclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n \n # The name of the Pygments (syntax highlighting) style to use.\n pygments_style = None\n@@ -82,20 +84,31 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'sphinx_rtd_theme'\n+html_theme = \"sphinx_rtd_theme\"\n \n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n # documentation.\n #\n-html_theme_options = {\n- 'analytics_id': 'UA-83738774-2'\n-}\n+html_theme_options = {\"analytics_id\": \"UA-83738774-2\"}\n+\n+# Configuration for OpenGraph and Twitter Card Tags.\n+# These are responsible for creating nice shareable social images https://ahrefs.com/blog/open-graph-meta-tags/\n+# https://ogp.me/#type_website\n+ogp_image = \"https://huggingface.co/front/thumbnails/docs/accelerate.png\"\n+ogp_description = \"Run your raw PyTorch training script on any kind of device. πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.)\"\n+ogp_description_length = 160\n+\n+ogp_custom_meta_tags = [\n+ f'<meta name=\"twitter:image\" content=\"{ogp_image}\">',\n+ f'<meta name=\"twitter:description\" content=\"{ogp_description}\">',\n+]\n+\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = ['_static']\n+html_static_path = [\"_static\"]\n \n # Custom sidebar templates, must be a dictionary that maps document names\n # to template names.\n@@ -107,17 +120,17 @@\n #\n # html_sidebars = {}\n \n-# This must be the name of an image file (path relative to the configuration \n-# directory) that is the favicon of the docs. Modern browsers use this as \n-# the icon for tabs, windows and bookmarks. It should be a Windows-style \n+# This must be the name of an image file (path relative to the configuration\n+# directory) that is the favicon of the docs. Modern browsers use this as\n+# the icon for tabs, windows and bookmarks. It should be a Windows-style\n # icon file (.ico).\n-html_favicon = 'favicon.ico'\n+html_favicon = \"favicon.ico\"\n \n \n # -- Options for HTMLHelp output ---------------------------------------------\n \n # Output file base name for HTML help builder.\n-htmlhelp_basename = 'acceleratedoc'\n+htmlhelp_basename = \"acceleratedoc\"\n \n \n # -- Options for LaTeX output ------------------------------------------------\n@@ -126,15 +139,12 @@\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n-\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n-\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n-\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n@@ -144,8 +154,7 @@\n # (source start file, target name, title,\n # author, documentclass [howto, manual, or own class]).\n latex_documents = [\n- (master_doc, 'accelerate.tex', u'accelerate Documentation',\n- u'huggingface', 'manual'),\n+ (master_doc, \"accelerate.tex\", \"accelerate Documentation\", \"huggingface\", \"manual\"),\n ]\n \n \n@@ -153,10 +162,7 @@\n \n # One entry per manual page. List of tuples\n # (source start file, name, description, authors, manual section).\n-man_pages = [\n- (master_doc, 'accelerate', u'accelerate Documentation',\n- [author], 1)\n-]\n+man_pages = [(master_doc, \"accelerate\", \"accelerate Documentation\", [author], 1)]\n \n \n # -- Options for Texinfo output ----------------------------------------------\n@@ -165,9 +171,15 @@\n # (source start file, target name, title, author,\n # dir menu entry, description, category)\n texinfo_documents = [\n- (master_doc, 'accelerate', u'accelerate Documentation',\n- author, 'accelerate', 'One line description of project.',\n- 'Miscellaneous'),\n+ (\n+ master_doc,\n+ \"accelerate\",\n+ \"accelerate Documentation\",\n+ author,\n+ \"accelerate\",\n+ \"One line description of project.\",\n+ \"Miscellaneous\",\n+ ),\n ]\n \n \n@@ -186,11 +198,13 @@\n # epub_uid = ''\n \n # A list of files that should not be packed into the epub file.\n-epub_exclude_files = ['search.html']\n+epub_exclude_files = [\"search.html\"]\n+\n \n def setup(app):\n- app.add_css_file('css/huggingface.css')\n- app.add_css_file('css/code-snippets.css')\n- app.add_js_file('js/custom.js')\n+ app.add_css_file(\"css/huggingface.css\")\n+ app.add_css_file(\"css/code-snippets.css\")\n+ app.add_js_file(\"js/custom.js\")\n+\n \n # -- Extension configuration -------------------------------------------------\ndiff --git a/setup.py b/setup.py\nindex b6722e124..e2ce2cfbc 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -24,6 +24,7 @@\n \"sphinx-markdown-tables\",\n \"sphinx-rtd-theme==0.4.3\",\n \"sphinx-copybutton\",\n+ \"sphinxext-opengraph==0.4.1\",\n ]\n extras[\"test\"] = [\n \"pytest\",\n", "code_comments": [], "context": [ { "body": "FYI the moon-landing PR has been merged and deployed :)", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/25", "pr_id": 610386427 }, { "diff": "diff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nindex 5b0dd00ba..fe2cb122a 100644\n--- a/src/accelerate/commands/config/config_args.py\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -42,13 +42,13 @@ def load_config_from_file(config_file):\n config_file = config_file if config_file is not None else default_config_file\n with open(config_file, \"r\", encoding=\"utf-8\") as f:\n if config_file.endswith(\".json\"):\n- if json.load(f)[\"compute_environment\"] == ComputeEnvironment.LOCAL_MACHINE:\n+ if json.load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE):\n config_class = ClusterConfig\n else:\n config_class = SageMakerConfig\n return config_class.from_json_file(json_file=config_file)\n else:\n- if yaml.safe_load(f)[\"compute_environment\"] == ComputeEnvironment.LOCAL_MACHINE:\n+ if yaml.safe_load(f).get(\"compute_environment\", ComputeEnvironment.LOCAL_MACHINE):\n config_class = ClusterConfig\n else:\n config_class = SageMakerConfig\n@@ -73,7 +73,10 @@ def to_dict(self):\n def from_json_file(cls, json_file=None):\n json_file = default_json_config_file if json_file is None else json_file\n with open(json_file, \"r\", encoding=\"utf-8\") as f:\n- return cls(**json.load(f))\n+ config_dict = json.load(f)\n+ if \"compute_environment\" not in config_dict:\n+ config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+ return cls(**config_dict)\n \n def to_json_file(self, json_file):\n with open(json_file, \"w\", encoding=\"utf-8\") as f:\n@@ -84,7 +87,10 @@ def to_json_file(self, json_file):\n def from_yaml_file(cls, yaml_file=None):\n yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n- return cls(**yaml.safe_load(f))\n+ config_dict = yaml.safe_load(f)\n+ if \"compute_environment\" not in config_dict:\n+ config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n+ return cls(**config_dict)\n \n def to_yaml_file(self, yaml_file):\n with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n", "code_comments": [], "context": [ { "body": "No problem @philschmid, I should have caught it during the review :-) ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/23", "pr_id": 604294677 }, { "diff": "diff --git a/README.md b/README.md\nindex 68d6866b5..77c6b6ebf 100644\n--- a/README.md\n+++ b/README.md\n@@ -53,38 +53,38 @@ limitations under the License.\n Here is an example:\n \n ```diff\n-import torch\n-import torch.nn.functional as F\n-from datasets import load_dataset\n+ import torch\n+ import torch.nn.functional as F\n+ from datasets import load_dataset\n \n + from accelerate import Accelerator\n + accelerator = Accelerator()\n - device = 'cpu'\n + device = accelerator.device\n \n-model = torch.nn.Transformer().to(device)\n-optim = torch.optim.Adam(model.parameters())\n+ model = torch.nn.Transformer().to(device)\n+ optim = torch.optim.Adam(model.parameters())\n \n-dataset = load_dataset('my_dataset')\n-data = torch.utils.data.Dataloader(dataset)\n+ dataset = load_dataset('my_dataset')\n+ data = torch.utils.data.Dataloader(dataset)\n \n + model, optim, data = accelerator.prepare(model, optim, data)\n \n-model.train()\n-for epoch in range(10):\n- for source, targets in data:\n- source = source.to(device)\n- targets = targets.to(device)\n+ model.train()\n+ for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n \n- optimizer.zero_grad()\n+ optimizer.zero_grad()\n \n- output = model(source, targets)\n- loss = F.cross_entropy(output, targets)\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n \n + accelerator.backward(loss)\n - loss.backward()\n \n- optimizer.step()\n+ optimizer.step()\n ```\n \n As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).\n@@ -94,9 +94,9 @@ In particular, the same code can then be run without modification on your local\n πŸ€— Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further:\n \n ```diff\n-import torch\n-import torch.nn.functional as F\n-from datasets import load_dataset\n+ import torch\n+ import torch.nn.functional as F\n+ from datasets import load_dataset\n \n + from accelerate import Accelerator\n + accelerator = Accelerator()\n@@ -104,28 +104,28 @@ from datasets import load_dataset\n \n + model = torch.nn.Transformer()\n - model = torch.nn.Transformer().to(device)\n-optim = torch.optim.Adam(model.parameters())\n+ optim = torch.optim.Adam(model.parameters())\n \n-dataset = load_dataset('my_dataset')\n-data = torch.utils.data.Dataloader(dataset)\n+ dataset = load_dataset('my_dataset')\n+ data = torch.utils.data.Dataloader(dataset)\n \n + model, optim, data = accelerator.prepare(model, optim, data)\n \n-model.train()\n-for epoch in range(10):\n- for source, targets in data:\n+ model.train()\n+ for epoch in range(10):\n+ for source, targets in data:\n - source = source.to(device)\n - targets = targets.to(device)\n \n- optimizer.zero_grad()\n+ optimizer.zero_grad()\n \n- output = model(source, targets)\n- loss = F.cross_entropy(output, targets)\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n \n + accelerator.backward(loss)\n - loss.backward()\n \n- optimizer.step()\n+ optimizer.step()\n ```\n \n ## Launching script\ndiff --git a/docs/source/index.rst b/docs/source/index.rst\nindex 57933888d..efdeb8f88 100644\n--- a/docs/source/index.rst\n+++ b/docs/source/index.rst\n@@ -47,53 +47,58 @@ A traditional training loop in PyTorch looks like this:\n \n Changing it to work with accelerate is really easy and only adds a few lines of code:\n \n-.. code-block:: python\n-\n- from accelerate import Accelerator\n-\n- accelerator = Accelerator()\n- # Use the device given by the `accelerator` object.\n- device = accelerator.device\n- my_model.to(device)\n- # Pass every important object (model, optimizer, dataloader) to `accelerator.prepare`\n- my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n- my_model, my_optimizer, my_training_dataloader\n- )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- inputs = inputs.to(device)\n- targets = targets.to(device)\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n- accelerate.backward(loss)\n- my_optimizer.step()\n+.. code-block:: diff\n+\n+ + from accelerate import Accelerator\n+\n+ + accelerator = Accelerator()\n+ # Use the device given by the `accelerator` object.\n+ + device = accelerator.device\n+ my_model.to(device)\n+ # Pass every important object (model, optimizer, dataloader) to `accelerator.prepare`\n+ + my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n+ + my_model, my_optimizer, my_training_dataloader\n+ + )\n+\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ inputs = inputs.to(device)\n+ targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ # Just a small change for the backward instruction\n+ - loss.backward()\n+ + accelerate.backward(loss)\n+ my_optimizer.step()\n \n and with this, your script can now run in a distributed environment (multi-GPU, TPU).\n \n You can even simplify your script a bit by letting πŸ€— Accelerate handle the device placement for you (which is safer,\n especially for TPU training):\n \n-.. code-block:: python\n-\n- from accelerate import Accelerator\n-\n- accelerator = Accelerator()\n- # Pass every important object (model, optimizer, dataloader) to `accelerator.prepare`\n- my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n- my_model, my_optimizer, my_training_dataloader\n- )\n-\n- for batch in my_training_dataloader:\n- my_optimizer.zero_grad()\n- inputs, targets = batch\n- outputs = my_model(inputs)\n- loss = my_loss_function(outputs, targets)\n- # Just a small change for the backward instruction\n- accelerate.backward(loss)\n- my_optimizer.step()\n+.. code-block:: diff\n+\n+ + from accelerate import Accelerator\n+\n+ + accelerator = Accelerator()\n+ - my_model.to(device)\n+ # Pass every important object (model, optimizer, dataloader) to `accelerator.prepare`\n+ + my_model, my_optimizer, my_training_dataloader = accelerate.prepare(\n+ + my_model, my_optimizer, my_training_dataloader\n+ + )\n+\n+ for batch in my_training_dataloader:\n+ my_optimizer.zero_grad()\n+ inputs, targets = batch\n+ - inputs = inputs.to(device)\n+ - targets = targets.to(device)\n+ outputs = my_model(inputs)\n+ loss = my_loss_function(outputs, targets)\n+ # Just a small change for the backward instruction\n+ - loss.backward()\n+ + accelerate.backward(loss)\n+ my_optimizer.step()\n \n \n Script launcher\ndiff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 31c94ef81..1d56f9e4b 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -359,7 +359,7 @@ The random number generator synchronization will by default synchronize:\n - the main random number generator in PyTorch <=1.5.1\n \n You can choose which random number generator(s) to synchronize with the :obj:`rng_types` argument of the main\n-:class:`~accelerate.Accelerator`. In PyTorch >= 1.6, it is recommended to rely on local :obj:`generator`s to avoid\n+:class:`~accelerate.Accelerator`. In PyTorch >= 1.6, it is recommended to rely on local :obj:`generator` to avoid\n setting the same seed in the main random number generator in all processes.\n \n .. Warning::\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/22", "pr_id": 604022867 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex e67a4cee1..a8c11b887 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -198,10 +198,10 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index\n if pad_first:\n indices = tuple(\n- slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(size))\n+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))\n )\n else:\n- indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(size)))\n+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))\n new_tensor[indices] = tensor\n return new_tensor\n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/21", "pr_id": 602836841 }, { "diff": "diff --git a/README.md b/README.md\nindex 219b93248..68d6866b5 100644\n--- a/README.md\n+++ b/README.md\n@@ -52,36 +52,23 @@ limitations under the License.\n \n Here is an example:\n \n-<table>\n-<tr>\n-<th> Original training code <br> (CPU or mono-GPU only)</th>\n-<th> With Accelerate <br> (CPU/GPU/multi-GPUs/TPUs/fp16) </th>\n-</tr>\n-<tr>\n-<td>\n-\n-```python\n+```diff\n import torch\n import torch.nn.functional as F\n from datasets import load_dataset\n \n-\n-\n-device = 'cpu'\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+- device = 'cpu'\n++ device = accelerator.device\n \n model = torch.nn.Transformer().to(device)\n-optim = torch.optim.Adam(\n- model.parameters()\n-)\n+optim = torch.optim.Adam(model.parameters())\n \n dataset = load_dataset('my_dataset')\n-data = torch.utils.data.Dataloader(\n- dataset\n-)\n-\n-\n-\n+data = torch.utils.data.Dataloader(dataset)\n \n++ model, optim, data = accelerator.prepare(model, optim, data)\n \n model.train()\n for epoch in range(10):\n@@ -92,166 +79,55 @@ for epoch in range(10):\n optimizer.zero_grad()\n \n output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n-\n- loss.backward()\n-\n- optimizer.step()\n-```\n-\n-</td>\n-<td>\n-\n-```python\n- import torch\n- import torch.nn.functional as F\n- from datasets import load_dataset\n-\n-+ from accelerate import Accelerator\n-+ accelerator = Accelerator()\n-+ device = accelerator.device\n-\n- model = torch.nn.Transformer().to(device)\n- optim = torch.optim.Adam(\n- model.parameters()\n- )\n-\n- dataset = load_dataset('my_dataset')\n- data = torch.utils.data.Dataloader(\n- dataset\n- )\n-\n-+ model, optim, data = accelerator.prepare(\n-+ model, optim, data\n-+ )\n-\n- model.train()\n- for epoch in range(10):\n- for source, targets in data:\n- source = source.to(device)\n- targets = targets.to(device)\n-\n- optimizer.zero_grad()\n-\n- output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n+ loss = F.cross_entropy(output, targets)\n \n + accelerator.backward(loss)\n+- loss.backward()\n \n- optimizer.step()\n+ optimizer.step()\n ```\n \n-</td>\n-</tr>\n-</table>\n-\n As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).\n \n In particular, the same code can then be run without modification on your local machine for debugging or your training environment.\n \n πŸ€— Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further:\n \n-<table>\n-<tr>\n-<th> Original training code <br> (CPU or mono-GPU only)</th>\n-<th> With Accelerate <br> (CPU/GPU/multi-GPUs/TPUs/fp16) </th>\n-</tr>\n-<tr>\n-<td>\n-\n-```python\n+```diff\n import torch\n import torch.nn.functional as F\n from datasets import load_dataset\n \n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n+- device = 'cpu'\n \n-\n-device = 'cpu'\n-\n-model = torch.nn.Transformer().to(device)\n-optim = torch.optim.Adam(\n- model.parameters()\n-)\n++ model = torch.nn.Transformer()\n+- model = torch.nn.Transformer().to(device)\n+optim = torch.optim.Adam(model.parameters())\n \n dataset = load_dataset('my_dataset')\n-data = torch.utils.data.Dataloader(\n- dataset\n-)\n-\n-\n-\n+data = torch.utils.data.Dataloader(dataset)\n \n++ model, optim, data = accelerator.prepare(model, optim, data)\n \n model.train()\n for epoch in range(10):\n for source, targets in data:\n- source = source.to(device)\n- targets = targets.to(device)\n+- source = source.to(device)\n+- targets = targets.to(device)\n \n optimizer.zero_grad()\n \n output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n-\n- loss.backward()\n-\n- optimizer.step()\n-```\n-\n-</td>\n-<td>\n-\n-```python\n- import torch\n- import torch.nn.functional as F\n- from datasets import load_dataset\n-\n-+ from accelerate import Accelerator\n-+ accelerator = Accelerator()\n--\n-\n-+ model = torch.nn.Transformer()\n- optim = torch.optim.Adam(\n- model.parameters()\n- )\n-\n- dataset = load_dataset('my_dataset')\n- data = torch.utils.data.Dataloader(\n- dataset\n- )\n-\n-+ model, optim, data = accelerator.prepare(\n-+ model, optim, data\n-+ )\n-\n- model.train()\n- for epoch in range(10):\n- for source, targets in data:\n--\n--\n-\n- optimizer.zero_grad()\n-\n- output = model(source, targets)\n- loss = F.cross_entropy(\n- output, targets\n- )\n+ loss = F.cross_entropy(output, targets)\n \n + accelerator.backward(loss)\n+- loss.backward()\n \n- optimizer.step()\n+ optimizer.step()\n ```\n \n-</td>\n-</tr>\n-</table>\n-\n ## Launching script\n \n πŸ€— Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/20", "pr_id": 601929380 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 1e3ac4e89..31c94ef81 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -130,6 +130,13 @@ do with the :meth:`~accelerate.Accelerator.gather` method.\n Any instruction using your training dataloader length (for instance if you need the number of total training steps\n to create a learning rate scheduler) should go after the call to :meth:`~accelerate.Accelerator.prepare`.\n \n+.. Warning::\n+\n+ The :meth:`~accelerate.Accelerator.gather` method requires the tensors to be all the same size on each process. If\n+ you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in\n+ a batch), you should use the :meth:`~accelerate.Accelerator.pad_across_processes` method to pad you tensor to the\n+ biggest size across processes.\n+\n \n Launching your distributed script\n -----------------------------------------------------------------------------------------------------------------------\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex d9f7cf1bc..75539559e 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -22,7 +22,7 @@\n from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n from .state import AcceleratorState, DistributedType\n-from .utils import RNGType, extract_model_from_parallel, gather, save, wait_for_everyone\n+from .utils import RNGType, extract_model_from_parallel, gather, pad_across_processes, save, wait_for_everyone\n \n \n class Accelerator:\n@@ -289,6 +289,23 @@ def gather(self, tensor):\n \"\"\"\n return gather(tensor)\n \n+ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):\n+ \"\"\"\n+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so\n+ they can safely be gathered.\n+\n+ Args:\n+ tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ The data to gather.\n+ dim (:obj:`int`, `optional`, defaults to 0):\n+ The dimension on which to pad.\n+ pad_index (:obj:`int`, `optional`, defaults to 0):\n+ The value with which to pad.\n+ pad_first (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ Whether to pad at the beginning or the end.\n+ \"\"\"\n+ return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)\n+\n def unwrap_model(self, model):\n \"\"\"\n Unwraps the :obj:`model` from the additional layer possible added by :meth:`~accelerate.Accelerator.prepare`.\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex f3a9ae92a..96368ba7b 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -21,7 +21,6 @@\n import subprocess\n import sys\n from pathlib import Path\n-from typing import Optional\n \n from accelerate.commands.config import LaunchConfig, default_config_file\n from accelerate.state import DistributedType\n@@ -68,9 +67,7 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--machine_rank\", type=int, default=0, help=\"The rank of the machine on which this script is launched.\"\n )\n- parser.add_argument(\n- \"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\"\n- )\n+ parser.add_argument(\"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\")\n parser.add_argument(\n \"--main_process_port\",\n type=int,\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 61a39fc95..e67a4cee1 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -148,7 +148,7 @@ def _gpu_gather(tensor):\n \n def gather(tensor):\n \"\"\"\n- Recusrively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n \n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n@@ -165,6 +165,47 @@ def gather(tensor):\n return tensor\n \n \n+def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):\n+ \"\"\"\n+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they\n+ can safely be gathered.\n+\n+ Args:\n+ tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n+ The data to gather.\n+ dim (:obj:`int`, `optional`, defaults to 0):\n+ The dimension on which to pad.\n+ pad_index (:obj:`int`, `optional`, defaults to 0):\n+ The value with which to pad.\n+ pad_first (:obj:`bool`, `optional`, defaults to :obj:`False`):\n+ Whether to pad at the beginning or the end.\n+ \"\"\"\n+ if isinstance(tensor, (list, tuple)):\n+ return type(tensor)(pad_across_processes(t, dim=dim, pad_index=pad_index) for t in tensor)\n+ elif isinstance(tensor, dict):\n+ return type(tensor)({k: pad_across_processes(v, dim=dim, pad_index=pad_index) for k, v in tensor.items()})\n+ elif not isinstance(tensor, torch.Tensor):\n+ raise TypeError(f\"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n+\n+ # Gather all sizes\n+ size = torch.tensor(tensor.shape, device=tensor.device)[None]\n+ sizes = gather(size).cpu()\n+ # Then pad to the maximum size\n+ max_size = max(s[dim] for s in sizes)\n+ old_size = tensor.shape\n+ new_size = list(old_size)\n+ new_size[dim] = max_size\n+ new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index\n+ if pad_first:\n+ indices = tuple(\n+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(size))\n+ )\n+ else:\n+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(size)))\n+ new_tensor[indices] = tensor\n+ return new_tensor\n+\n+\n def wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\ndiff --git a/tests/test_multigpu.py b/tests/test_multigpu.py\nindex ce69a3db3..7ffbd94f8 100644\n--- a/tests/test_multigpu.py\n+++ b/tests/test_multigpu.py\n@@ -20,6 +20,7 @@\n import torch\n \n import accelerate\n+from accelerate import Accelerator\n from accelerate.test_utils import execute_subprocess_async, require_multi_gpu\n \n \n@@ -39,3 +40,43 @@ def test_multi_gpu(self):\n \"\"\".split()\n cmd = [sys.executable] + distributed_args\n execute_subprocess_async(cmd, env=os.environ.copy())\n+\n+ @require_multi_gpu\n+ def test_pad_across_processes(self):\n+ distributed_args = f\"\"\"\n+ -m torch.distributed.launch\n+ --nproc_per_node={torch.cuda.device_count()}\n+ --use_env\n+ {inspect.getfile(self.__class__)}\n+ \"\"\".split()\n+ cmd = [sys.executable] + distributed_args\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n+\n+\n+if __name__ == \"__main__\":\n+ accelerator = Accelerator()\n+ shape = (accelerator.state.process_index + 2, 10)\n+ tensor = torch.randint(0, 10, shape).to(accelerator.device)\n+\n+ error_msg = \"\"\n+\n+ tensor1 = accelerator.pad_across_processes(tensor)\n+ if tensor1.shape[0] != accelerator.state.num_processes + 1:\n+ error_msg += f\"Found shape {tensor1.shape} but should have {accelerator.state.num_processes + 1} at dim 0.\"\n+ if not torch.equal(tensor1[: accelerator.state.process_index + 2], tensor):\n+ error_msg += \"Tensors have different values.\"\n+ if not torch.all(tensor1[accelerator.state.process_index + 2 :] == 0):\n+ error_msg += \"Padding was not done with the right value (0).\"\n+\n+ tensor2 = accelerator.pad_across_processes(tensor, pad_first=True)\n+ if tensor2.shape[0] != accelerator.state.num_processes + 1:\n+ error_msg += f\"Found shape {tensor2.shape} but should have {accelerator.state.num_processes + 1} at dim 0.\"\n+ index = accelerator.state.num_processes - accelerator.state.process_index - 1\n+ if not torch.equal(tensor2[index:], tensor):\n+ error_msg += \"Tensors have different values.\"\n+ if not torch.all(tensor2[:index] == 0):\n+ error_msg += \"Padding was not done with the right value (0).\"\n+\n+ # Raise error at the end to make sure we don't stop at the first failure.\n+ if len(error_msg) > 0:\n+ raise ValueError(error_msg)\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/19", "pr_id": 601823127 }, { "diff": "diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml\nnew file mode 100644\nindex 000000000..9b54a269f\n--- /dev/null\n+++ b/.github/workflows/quality.yml\n@@ -0,0 +1,17 @@\n+name: Quality Check\n+\n+on: [pull_request]\n+\n+jobs:\n+ quality:\n+ runs-on: ubuntu-latest\n+ steps:\n+ - uses: actions/checkout@v2\n+ - name: Set up Python 3.6\n+ uses: actions/setup-python@v2\n+ with:\n+ python-version: 3.6\n+ - name: Install Python dependencies\n+ run: pip install -e .[quality]\n+ - name: Run Quality check\n+ run: make quality\n\\ No newline at end of file\ndiff --git a/.github/workflows/test.yml b/.github/workflows/test.yml\nnew file mode 100644\nindex 000000000..aa04063d8\n--- /dev/null\n+++ b/.github/workflows/test.yml\n@@ -0,0 +1,17 @@\n+name: Run Tests\n+\n+on: [pull_request]\n+\n+jobs:\n+ test:\n+ runs-on: ubuntu-latest\n+ steps:\n+ - uses: actions/checkout@v2\n+ - name: Set up Python 3.6\n+ uses: actions/setup-python@v2\n+ with:\n+ python-version: 3.6\n+ - name: Install Python dependencies\n+ run: pip install -e .[test]\n+ - name: Run Tests\n+ run: make test\n\\ No newline at end of file\ndiff --git a/setup.py b/setup.py\nindex 7a7a6a9cd..0fdc5e785 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -17,7 +17,21 @@\n \n extras = {}\n extras[\"quality\"] = [\"black >= 20.8b1\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n-extras[\"docs\"] = [\"recommonmark\", \"sphinx==3.2.1\", \"sphinx-markdown-tables\", \"sphinx-rtd-theme==0.4.3\", \"sphinx-copybutton\"]\n+extras[\"docs\"] = [\n+ \"recommonmark\",\n+ \"sphinx==3.2.1\",\n+ \"sphinx-markdown-tables\",\n+ \"sphinx-rtd-theme==0.4.3\",\n+ \"sphinx-copybutton\",\n+]\n+extras[\"test\"] = [\n+ \"pytest\",\n+ \"pytest-xdist\",\n+]\n+\n+extras[\"sagemaker\"] = [\n+ \"sagemaker\", # boto3 is a required package in sagemaker\n+]\n \n setup(\n name=\"accelerate\",\n@@ -32,11 +46,13 @@\n url=\"https://github.com/huggingface/accelerate\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n- entry_points={\"console_scripts\": [\n- \"accelerate=accelerate.commands.accelerate_cli:main\",\n- \"accelerate-config=accelerate.commands.config:main\",\n- \"accelerate-launch=accelerate.commands.launch:main\",\n- ]},\n+ entry_points={\n+ \"console_scripts\": [\n+ \"accelerate=accelerate.commands.accelerate_cli:main\",\n+ \"accelerate-config=accelerate.commands.config:main\",\n+ \"accelerate-launch=accelerate.commands.launch:main\",\n+ ]\n+ },\n python_requires=\">=3.6.0\",\n install_requires=[\"torch>=1.4.0\"],\n extras_require=extras,\ndiff --git a/src/accelerate/commands/config.py b/src/accelerate/commands/config.py\ndeleted file mode 100644\nindex c32905932..000000000\n--- a/src/accelerate/commands/config.py\n+++ /dev/null\n@@ -1,218 +0,0 @@\n-#!/usr/bin/env python\n-\n-# Copyright 2021 The HuggingFace Team. All rights reserved.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import argparse\n-import json\n-import os\n-from dataclasses import dataclass\n-from enum import Enum\n-from typing import Optional\n-\n-import yaml\n-from accelerate.state import DistributedType\n-\n-\n-hf_cache_home = os.path.expanduser(\n- os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n-)\n-cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n-default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n-default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n-\n-# For backward compatibility: the default config is the json one if it's the only existing file.\n-if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n- default_config_file = default_yaml_config_file\n-else:\n- default_config_file = default_json_config_file\n-\n-\n-@dataclass\n-class LaunchConfig:\n- distributed_type: DistributedType\n- num_processes: int\n- fp16: bool\n- machine_rank: int = 0\n- num_machines: int = 1\n- main_process_ip: Optional[str] = None\n- main_process_port: Optional[int] = None\n- main_training_function: str = \"main\"\n-\n- def __post_init__(self):\n- if isinstance(self.distributed_type, str):\n- self.distributed_type = DistributedType(self.distributed_type)\n-\n- def to_dict(self):\n- result = self.__dict__\n- # For serialization, it's best to convert Enums to strings (or their underlying value type).\n- for key, value in result.items():\n- if isinstance(value, Enum):\n- result[key] = value.value\n- return result\n-\n- @classmethod\n- def from_json_file(cls, json_file=None):\n- json_file = default_json_config_file if json_file is None else json_file\n- with open(json_file, \"r\", encoding=\"utf-8\") as f:\n- return cls(**json.load(f))\n-\n- def to_json_file(self, json_file):\n- with open(json_file, \"w\", encoding=\"utf-8\") as f:\n- content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n- f.write(content)\n-\n- @classmethod\n- def from_yaml_file(cls, yaml_file=None):\n- yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n- with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n- return cls(**yaml.safe_load(f))\n-\n- def to_yaml_file(self, yaml_file):\n- with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n- yaml.safe_dump(self.to_dict(), f)\n-\n-\n-def config_command_parser(subparsers=None):\n- if subparsers is not None:\n- parser = subparsers.add_parser(\"config\")\n- else:\n- parser = argparse.ArgumentParser(\"Accelerate config command\")\n-\n- parser.add_argument(\n- \"--config_file\",\n- default=None,\n- help=(\n- \"The path to use to store the config file. Will default to a file named default_config.json in the cache \"\n- \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n- \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n- \"with 'huggingface'.\"\n- ),\n- )\n-\n- if subparsers is not None:\n- parser.set_defaults(func=config_command)\n- return parser\n-\n-\n-def _ask_field(input_text, convert_value=None, default=None, error_message=None):\n- ask_again = True\n- while ask_again:\n- result = input(input_text)\n- try:\n- if default is not None and len(result) == 0:\n- return default\n- return convert_value(result) if convert_value is not None else result\n- except:\n- if error_message is not None:\n- print(error_message)\n-\n-\n-def get_user_input():\n- def _convert_distributed_mode(value):\n- value = int(value)\n- return DistributedType([\"NO\", \"MULTI_GPU\", \"TPU\"][value])\n-\n- def _convert_yes_no_to_bool(value):\n- return {\"yes\": True, \"no\": False}[value.lower()]\n-\n- distributed_type = _ask_field(\n- \"Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): \",\n- _convert_distributed_mode,\n- error_message=\"Please enter 0, 1 or 2.\",\n- )\n-\n- machine_rank = 0\n- num_machines = 1\n- main_process_ip = None\n- main_process_port = None\n- if distributed_type == DistributedType.MULTI_GPU:\n- num_machines = _ask_field(\n- \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n- lambda x: int(x),\n- default=1,\n- )\n- if num_machines > 1:\n- machine_rank = _ask_field(\n- \"What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: \",\n- lambda x: int(x),\n- default=0,\n- )\n- main_process_ip = _ask_field(\n- \"What is the IP address of the machine that will host the main process? \",\n- )\n- main_process_ip = _ask_field(\n- \"What is the port you will use to communicate with the main process? \",\n- lambda x: int(x),\n- )\n- if distributed_type == DistributedType.TPU:\n- main_training_function = _ask_field(\n- \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n- default=\"main\",\n- )\n- else:\n- main_training_function = \"main\"\n-\n- num_processes = _ask_field(\n- \"How many processes in total will you use? [1]: \",\n- lambda x: int(x),\n- default=1,\n- error_message=\"Please enter an integer.\",\n- )\n-\n- if distributed_type != DistributedType.TPU:\n- fp16 = _ask_field(\n- \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n- _convert_yes_no_to_bool,\n- default=False,\n- error_message=\"Please enter yes or no.\",\n- )\n- else:\n- fp16 = False\n-\n- return LaunchConfig(\n- distributed_type=distributed_type,\n- num_processes=num_processes,\n- fp16=fp16,\n- machine_rank=machine_rank,\n- num_machines=num_machines,\n- main_process_ip=main_process_ip,\n- main_process_port=main_process_port,\n- main_training_function=main_training_function,\n- )\n-\n-\n-def config_command(args):\n- config = get_user_input()\n- if args.config_file is not None:\n- config_file = args.config_file\n- else:\n- if not os.path.isdir(cache_dir):\n- os.makedirs(cache_dir)\n- config_file = default_yaml_config_file\n-\n- if config_file.endswith(\".json\"):\n- config.to_json_file(config_file)\n- else:\n- config.to_yaml_file(config_file)\n-\n-\n-def main():\n- parser = config_command_parser()\n- args = parser.parse_args()\n- config_command(args)\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/src/accelerate/commands/config/__init__.py b/src/accelerate/commands/config/__init__.py\nnew file mode 100644\nindex 000000000..48ea96ab1\n--- /dev/null\n+++ b/src/accelerate/commands/config/__init__.py\n@@ -0,0 +1,85 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+\n+from accelerate.state import ComputeEnvironment\n+\n+from .cluster import get_cluster_input\n+from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401\n+from .config_utils import _ask_field, _convert_compute_environment\n+from .sagemaker import get_sagemaker_input\n+\n+\n+def get_user_input():\n+ compute_environment = _ask_field(\n+ \"In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): \",\n+ _convert_compute_environment,\n+ error_message=\"Please enter 0 or 1\",\n+ )\n+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n+ config = get_sagemaker_input()\n+ else:\n+ config = get_cluster_input()\n+ return config\n+\n+\n+def config_command_parser(subparsers=None):\n+ if subparsers is not None:\n+ parser = subparsers.add_parser(\"config\")\n+ else:\n+ parser = argparse.ArgumentParser(\"Accelerate config command\")\n+\n+ parser.add_argument(\n+ \"--config_file\",\n+ default=None,\n+ help=(\n+ \"The path to use to store the config file. Will default to a file named default_config.json in the cache \"\n+ \"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have \"\n+ \"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed \"\n+ \"with 'huggingface'.\"\n+ ),\n+ )\n+\n+ if subparsers is not None:\n+ parser.set_defaults(func=config_command)\n+ return parser\n+\n+\n+def config_command(args):\n+ config = get_user_input()\n+ if args.config_file is not None:\n+ config_file = args.config_file\n+ else:\n+ if not os.path.isdir(cache_dir):\n+ os.makedirs(cache_dir)\n+ config_file = default_yaml_config_file\n+\n+ if config_file.endswith(\".json\"):\n+ config.to_json_file(config_file)\n+ else:\n+ config.to_yaml_file(config_file)\n+\n+\n+def main():\n+ parser = config_command_parser()\n+ args = parser.parse_args()\n+ config_command(args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\nnew file mode 100644\nindex 000000000..a9e412f7d\n--- /dev/null\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -0,0 +1,88 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from accelerate.state import ComputeEnvironment, DistributedType\n+\n+from .config_args import ClusterConfig\n+from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n+\n+\n+def get_cluster_input():\n+ distributed_type = _ask_field(\n+ \"Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): \",\n+ _convert_distributed_mode,\n+ error_message=\"Please enter 0, 1 or 2.\",\n+ )\n+\n+ machine_rank = 0\n+ num_machines = 1\n+ main_process_ip = None\n+ main_process_port = None\n+ if distributed_type == DistributedType.MULTI_GPU:\n+ num_machines = _ask_field(\n+ \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ )\n+ if num_machines > 1:\n+ machine_rank = _ask_field(\n+ \"What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: \",\n+ lambda x: int(x),\n+ default=0,\n+ )\n+ main_process_ip = _ask_field(\n+ \"What is the IP address of the machine that will host the main process? \",\n+ )\n+ main_process_ip = _ask_field(\n+ \"What is the port you will use to communicate with the main process? \",\n+ lambda x: int(x),\n+ )\n+ if distributed_type == DistributedType.TPU:\n+ main_training_function = _ask_field(\n+ \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n+ default=\"main\",\n+ )\n+ else:\n+ main_training_function = \"main\"\n+\n+ num_processes = _ask_field(\n+ \"How many processes in total will you use? [1]: \",\n+ lambda x: int(x),\n+ default=1,\n+ error_message=\"Please enter an integer.\",\n+ )\n+\n+ if distributed_type != DistributedType.TPU:\n+ fp16 = _ask_field(\n+ \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+ else:\n+ fp16 = False\n+\n+ return ClusterConfig(\n+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,\n+ distributed_type=distributed_type,\n+ num_processes=num_processes,\n+ fp16=fp16,\n+ machine_rank=machine_rank,\n+ num_machines=num_machines,\n+ main_process_ip=main_process_ip,\n+ main_process_port=main_process_port,\n+ main_training_function=main_training_function,\n+ )\ndiff --git a/src/accelerate/commands/config/config_args.py b/src/accelerate/commands/config/config_args.py\nnew file mode 100644\nindex 000000000..5b0dd00ba\n--- /dev/null\n+++ b/src/accelerate/commands/config/config_args.py\n@@ -0,0 +1,119 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from dataclasses import dataclass\n+from enum import Enum\n+from typing import Optional, Union\n+\n+import yaml\n+from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+\n+hf_cache_home = os.path.expanduser(\n+ os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n+)\n+cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n+\n+# For backward compatibility: the default config is the json one if it's the only existing file.\n+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n+ default_config_file = default_yaml_config_file\n+else:\n+ default_config_file = default_json_config_file\n+\n+\n+def load_config_from_file(config_file):\n+ config_file = config_file if config_file is not None else default_config_file\n+ with open(config_file, \"r\", encoding=\"utf-8\") as f:\n+ if config_file.endswith(\".json\"):\n+ if json.load(f)[\"compute_environment\"] == ComputeEnvironment.LOCAL_MACHINE:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_json_file(json_file=config_file)\n+ else:\n+ if yaml.safe_load(f)[\"compute_environment\"] == ComputeEnvironment.LOCAL_MACHINE:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_yaml_file(yaml_file=config_file)\n+\n+\n+@dataclass\n+class BaseConfig:\n+ compute_environment: ComputeEnvironment\n+ distributed_type: Union[DistributedType, SageMakerDistributedType]\n+ fp16: bool\n+\n+ def to_dict(self):\n+ result = self.__dict__\n+ # For serialization, it's best to convert Enums to strings (or their underlying value type).\n+ for key, value in result.items():\n+ if isinstance(value, Enum):\n+ result[key] = value.value\n+ return result\n+\n+ @classmethod\n+ def from_json_file(cls, json_file=None):\n+ json_file = default_json_config_file if json_file is None else json_file\n+ with open(json_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**json.load(f))\n+\n+ def to_json_file(self, json_file):\n+ with open(json_file, \"w\", encoding=\"utf-8\") as f:\n+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n+ f.write(content)\n+\n+ @classmethod\n+ def from_yaml_file(cls, yaml_file=None):\n+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n+ with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**yaml.safe_load(f))\n+\n+ def to_yaml_file(self, yaml_file):\n+ with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n+ yaml.safe_dump(self.to_dict(), f)\n+\n+ def __post_init__(self):\n+ if isinstance(self.compute_environment, str):\n+ self.compute_environment = ComputeEnvironment(self.compute_environment)\n+ if isinstance(self.distributed_type, str):\n+ self.distributed_type = DistributedType(self.distributed_type)\n+\n+\n+@dataclass\n+class ClusterConfig(BaseConfig):\n+ num_processes: int\n+ machine_rank: int = 0\n+ num_machines: int = 1\n+ main_process_ip: Optional[str] = None\n+ main_process_port: Optional[int] = None\n+ main_training_function: str = \"main\"\n+\n+\n+@dataclass\n+class SageMakerConfig(BaseConfig):\n+ ec2_instance_type: str\n+ iam_role_name: str\n+ profile: Optional[str] = None\n+ region: str = \"us-east-1\"\n+ num_machines: int = 1\n+ base_job_name: str = f\"accelerate-sagemaker-{num_machines}\"\n+ pytroch_version: str = \"1.6\"\n+ transformers_version: str = \"4.4\"\ndiff --git a/src/accelerate/commands/config/config_utils.py b/src/accelerate/commands/config/config_utils.py\nnew file mode 100644\nindex 000000000..dd14965f7\n--- /dev/null\n+++ b/src/accelerate/commands/config/config_utils.py\n@@ -0,0 +1,49 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+\n+def _ask_field(input_text, convert_value=None, default=None, error_message=None):\n+ ask_again = True\n+ while ask_again:\n+ result = input(input_text)\n+ try:\n+ if default is not None and len(result) == 0:\n+ return default\n+ return convert_value(result) if convert_value is not None else result\n+ except:\n+ if error_message is not None:\n+ print(error_message)\n+\n+\n+def _convert_compute_environment(value):\n+ value = int(value)\n+ return ComputeEnvironment([\"LOCAL_MACHINE\", \"AMAZON_SAGEMAKER\"][value])\n+\n+\n+def _convert_distributed_mode(value):\n+ value = int(value)\n+ return DistributedType([\"NO\", \"MULTI_GPU\", \"TPU\"][value])\n+\n+\n+def _convert_sagemaker_distributed_mode(value):\n+ value = int(value)\n+ return SageMakerDistributedType([\"NO\", \"DATA_PARALLEL\", \"MODEL_PARALLEL\"][value])\n+\n+\n+def _convert_yes_no_to_bool(value):\n+ return {\"yes\": True, \"no\": False}[value.lower()]\ndiff --git a/src/accelerate/commands/config/sagemaker.py b/src/accelerate/commands/config/sagemaker.py\nnew file mode 100644\nindex 000000000..25fd3e6cb\n--- /dev/null\n+++ b/src/accelerate/commands/config/sagemaker.py\n@@ -0,0 +1,157 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import json\n+import os\n+\n+from accelerate.state import ComputeEnvironment, SageMakerDistributedType\n+from accelerate.utils import is_boto3_available\n+\n+from .config_args import SageMakerConfig\n+from .config_utils import _ask_field, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool\n+\n+\n+if is_boto3_available():\n+ import boto3 # noqa: F401\n+\n+\n+def _create_iam_role_for_sagemaker(role_name):\n+ iam_client = boto3.client(\"iam\")\n+\n+ sagemaker_trust_policy = {\n+ \"Version\": \"2012-10-17\",\n+ \"Statement\": [\n+ {\"Effect\": \"Allow\", \"Principal\": {\"Service\": \"sagemaker.amazonaws.com\"}, \"Action\": \"sts:AssumeRole\"}\n+ ],\n+ }\n+ try:\n+ # create the role, associated with the chosen trust policy\n+ iam_client.create_role(\n+ RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2)\n+ )\n+ policy_document = {\n+ \"Version\": \"2012-10-17\",\n+ \"Statement\": [\n+ {\n+ \"Effect\": \"Allow\",\n+ \"Action\": [\n+ \"sagemaker:*\",\n+ \"ecr:GetDownloadUrlForLayer\",\n+ \"ecr:BatchGetImage\",\n+ \"ecr:BatchCheckLayerAvailability\",\n+ \"ecr:GetAuthorizationToken\",\n+ \"cloudwatch:PutMetricData\",\n+ \"cloudwatch:GetMetricData\",\n+ \"cloudwatch:GetMetricStatistics\",\n+ \"cloudwatch:ListMetrics\",\n+ \"logs:CreateLogGroup\",\n+ \"logs:CreateLogStream\",\n+ \"logs:DescribeLogStreams\",\n+ \"logs:PutLogEvents\",\n+ \"logs:GetLogEvents\",\n+ \"s3:CreateBucket\",\n+ \"s3:ListBucket\",\n+ \"s3:GetBucketLocation\",\n+ \"s3:GetObject\",\n+ \"s3:PutObject\",\n+ ],\n+ \"Resource\": \"*\",\n+ }\n+ ],\n+ }\n+ # attach policy to role\n+ iam_client.put_role_policy(\n+ RoleName=role_name,\n+ PolicyName=f\"{role_name}_policy_permission\",\n+ PolicyDocument=json.dumps(policy_document, indent=2),\n+ )\n+ except iam_client.exceptions.EntityAlreadyExistsException:\n+ print(f\"role {role_name} already exists. Using existing one\")\n+\n+\n+def _get_iam_role_arn(role_name):\n+ iam_client = boto3.client(\"iam\")\n+ return iam_client.get_role(RoleName=role_name)[\"Role\"][\"Arn\"]\n+\n+\n+def get_sagemaker_input():\n+ credentials_configuration = _ask_field(\n+ \"How do you want to authorize? ([0] AWS Profile, [1] Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)): \",\n+ lambda x: int(x),\n+ )\n+ aws_profile = None\n+ if credentials_configuration == 0:\n+ aws_profile = _ask_field(\"Enter your AWS Profile name: [default] \", default=\"default\")\n+ os.environ[\"AWS_PROFILE\"] = aws_profile\n+ else:\n+ print(\n+ \"Accelerate will expose AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables on the machine\"\n+ )\n+ aws_access_key_id = _ask_field(\"AWS Access Key ID: \")\n+ os.environ[\"AWS_ACCESS_KEY_ID\"] = aws_access_key_id\n+\n+ aws_secret_access_key = _ask_field(\"AWS Secret Access Key: \")\n+ os.environ[\"AWS_SECRET_ACCESS_KEY\"] = aws_secret_access_key\n+\n+ aws_region = _ask_field(\"Enter your AWS Region: [us-east-1]\", default=\"us-east-1\")\n+ os.environ[\"AWS_DEFAULT_REGION\"] = aws_region\n+\n+ role_management = _ask_field(\n+ \"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs? ([0] provide IAM Role name, [1] create new IAM role using credentials: \",\n+ lambda x: int(x),\n+ )\n+ if role_management == 0:\n+ iam_role_name = _ask_field(\"Enter your IAM role name: \")\n+ else:\n+ iam_role_name = \"accelerate_sagemaker_execution_role\"\n+ print(f'Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials')\n+ _create_iam_role_for_sagemaker(iam_role_name)\n+\n+ distributed_type = _ask_field(\n+ \"Which type of machine are you using? ([0] No distributed training, [1] data parallelism, [2] model parallelism): \",\n+ _convert_sagemaker_distributed_mode,\n+ error_message=\"Please enter 0, 1 or 2\",\n+ )\n+\n+ # using the best two instances for single-gpu training or multi-gpu -> can turn into question to make it more diverse\n+ ec2_instance_type = \"ml.p3.2xlarge\" if distributed_type == SageMakerDistributedType.NO else \"ml.p3dn.24xlarge\"\n+ num_machines = 1\n+ if (\n+ distributed_type == SageMakerDistributedType.DATA_PARALLEL\n+ or distributed_type == SageMakerDistributedType.MODEL_PARALLEL\n+ ):\n+ raise NotImplementedError(\"Model or Data Parallelism is not implemented yet. We are working on it\")\n+ num_machines = _ask_field(\n+ \"How many machines do you want use? [2]: \",\n+ lambda x: int(x),\n+ default=2,\n+ )\n+ fp16 = _ask_field(\n+ \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n+ _convert_yes_no_to_bool,\n+ default=False,\n+ error_message=\"Please enter yes or no.\",\n+ )\n+\n+ return SageMakerConfig(\n+ compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,\n+ distributed_type=distributed_type,\n+ ec2_instance_type=ec2_instance_type,\n+ profile=aws_profile,\n+ region=aws_region,\n+ iam_role_name=iam_role_name,\n+ fp16=fp16,\n+ num_machines=num_machines,\n+ )\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex 061c289ea..b8e1e8156 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -23,8 +23,8 @@\n from pathlib import Path\n from typing import Optional\n \n-from accelerate.commands.config import LaunchConfig, default_config_file\n-from accelerate.state import DistributedType\n+from accelerate.commands.config import default_config_file, load_config_from_file\n+from accelerate.state import ComputeEnvironment, DistributedType\n \n \n class _AddOneArg:\n@@ -171,6 +171,12 @@ def tpu_launcher(args):\n xmp.spawn(main_function, args=(), nprocs=args.num_processes)\n \n \n+def sagemaker_launcher(sagemaker_config, args):\n+ raise NotImplementedError(\n+ \"Support for starting SageMaker training is not yet implemented. But you can create configs for it.\"\n+ )\n+\n+\n def launch_command(args):\n # Sanity checks\n if args.multi_gpu and args.tpu:\n@@ -178,15 +184,15 @@ def launch_command(args):\n \n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n- defaults = LaunchConfig.from_json_file(json_file=args.config_file)\n+ defaults = load_config_from_file(args.config_file)\n if not args.multi_gpu and not args.tpu:\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n args.tpu = defaults.distributed_type == DistributedType.TPU\n- if args.num_processes is None:\n+ if args.num_processes is None and defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n args.num_processes = defaults.num_processes\n if not args.fp16:\n args.fp16 = defaults.fp16\n- if args.main_training_function is None:\n+ if args.main_training_function is None and defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n args.main_training_function = defaults.main_training_function\n else:\n if args.num_processes is None:\n@@ -197,6 +203,8 @@ def launch_command(args):\n multi_gpu_launcher(args)\n elif args.tpu and not args.cpu:\n tpu_launcher(args)\n+ elif defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n+ sagemaker_launcher(defaults, args)\n else:\n simple_launcher(args)\n \ndiff --git a/src/accelerate/state.py b/src/accelerate/state.py\nindex 5f150add9..55831be0f 100644\n--- a/src/accelerate/state.py\n+++ b/src/accelerate/state.py\n@@ -53,6 +53,38 @@ class DistributedType(str, Enum):\n TPU = \"TPU\"\n \n \n+class SageMakerDistributedType(str, Enum):\n+ \"\"\"\n+ Represents a type of distributed environment.\n+\n+ Values:\n+\n+ - **NO** -- Not a distributed environment, just a single process.\n+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.\n+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\n+ NO = \"NO\"\n+ DATA_PARALLEL = \"DATA_PARALLEL\"\n+ MODEL_PARALLEL = \"MODEL_PARALLEL\"\n+\n+\n+class ComputeEnvironment(str, Enum):\n+ \"\"\"\n+ Represents a type of the compute environment.\n+\n+ Values:\n+\n+ - **LOCAL_MACHINE** -- private/custom cluster hardware.\n+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.\n+ LOCAL_MACHINE = \"LOCAL_MACHINE\"\n+ AMAZON_SAGEMAKER = \"AMAZON_SAGEMAKER\"\n+\n+\n # Inspired by Alex Martelli's 'Borg'.\n class AcceleratorState:\n \"\"\"\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 61a39fc95..ca2d558b0 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import importlib\n import random\n from enum import Enum\n from typing import List, Optional, Union\n@@ -26,6 +27,10 @@\n import torch_xla.core.xla_model as xm\n \n \n+def is_boto3_available():\n+ return importlib.util.find_spec(\"boto3\") is not None\n+\n+\n class RNGType(Enum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\n", "code_comments": [ { "body": "decoupled input gathering from main config `get_user_input()` into the separate options", "diff_hunk": "@@ -0,0 +1,88 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from accelerate.state import ComputeEnvironment, DistributedType\n+\n+from .config_args import ClusterConfig\n+from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n+\n+\n+def get_cluster_input():", "from_author": true }, { "body": "Determines which config flow should be used. ", "diff_hunk": "@@ -0,0 +1,85 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+\n+from accelerate.state import ComputeEnvironment\n+\n+from .cluster import get_cluster_input\n+from .config_args import cache_dir, default_yaml_config_file\n+from .config_utils import _ask_field, _convert_compute_environment\n+from .sagemaker import get_sagemaker_input\n+\n+\n+def get_user_input():\n+ compute_environment = _ask_field(", "from_author": true }, { "body": "create a separate file to merge all configuration `dataclasses`. We can also think about moving them so `state.py`", "diff_hunk": "@@ -0,0 +1,119 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+", "from_author": true }, { "body": "general loading function for determining if config file is `json` or `yaml` and which configuration is used for loading. Open for suggestions on how to identify the `config_class` easier. ", "diff_hunk": "@@ -0,0 +1,119 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from dataclasses import dataclass\n+from enum import Enum\n+from typing import Optional, Union\n+\n+import yaml\n+from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+\n+hf_cache_home = os.path.expanduser(\n+ os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n+)\n+cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n+\n+# For backward compatibility: the default config is the json one if it's the only existing file.\n+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n+ default_config_file = default_yaml_config_file\n+else:\n+ default_config_file = default_json_config_file\n+\n+\n+def load_config_from_file(config_file):", "from_author": true }, { "body": "base class contains overlapping properties and methods. ", "diff_hunk": "@@ -0,0 +1,119 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from dataclasses import dataclass\n+from enum import Enum\n+from typing import Optional, Union\n+\n+import yaml\n+from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+\n+hf_cache_home = os.path.expanduser(\n+ os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n+)\n+cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n+\n+# For backward compatibility: the default config is the json one if it's the only existing file.\n+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n+ default_config_file = default_yaml_config_file\n+else:\n+ default_config_file = default_json_config_file\n+\n+\n+def load_config_from_file(config_file):\n+ config_file = config_file if config_file is not None else default_config_file\n+ with open(config_file, \"r\", encoding=\"utf-8\") as f:\n+ if config_file.endswith(\".json\"):\n+ if json.load(f)[\"compute_environment\"] == ComputeEnvironment.CUSTOM_CLUSTER:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_json_file(json_file=config_file)\n+ else:\n+ if yaml.safe_load(f)[\"compute_environment\"] == ComputeEnvironment.CUSTOM_CLUSTER:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_yaml_file(yaml_file=config_file)\n+\n+\n+@dataclass\n+class BaseConfig:", "from_author": true }, { "body": "kept `num_machines` in specialist class due to `dataclass` -> `Dataclass fields without default value cannot appear after data fields with default values`. ", "diff_hunk": "@@ -0,0 +1,119 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from dataclasses import dataclass\n+from enum import Enum\n+from typing import Optional, Union\n+\n+import yaml\n+from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+\n+hf_cache_home = os.path.expanduser(\n+ os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n+)\n+cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n+\n+# For backward compatibility: the default config is the json one if it's the only existing file.\n+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n+ default_config_file = default_yaml_config_file\n+else:\n+ default_config_file = default_json_config_file\n+\n+\n+def load_config_from_file(config_file):\n+ config_file = config_file if config_file is not None else default_config_file\n+ with open(config_file, \"r\", encoding=\"utf-8\") as f:\n+ if config_file.endswith(\".json\"):\n+ if json.load(f)[\"compute_environment\"] == ComputeEnvironment.CUSTOM_CLUSTER:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_json_file(json_file=config_file)\n+ else:\n+ if yaml.safe_load(f)[\"compute_environment\"] == ComputeEnvironment.CUSTOM_CLUSTER:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_yaml_file(yaml_file=config_file)\n+\n+\n+@dataclass\n+class BaseConfig:\n+ compute_environment: ComputeEnvironment\n+ distributed_type: Union[DistributedType, SageMakerDistributedType]\n+ fp16: bool\n+\n+ def to_dict(self):\n+ result = self.__dict__\n+ # For serialization, it's best to convert Enums to strings (or their underlying value type).\n+ for key, value in result.items():\n+ if isinstance(value, Enum):\n+ result[key] = value.value\n+ return result\n+\n+ @classmethod\n+ def from_json_file(cls, json_file=None):\n+ json_file = default_json_config_file if json_file is None else json_file\n+ with open(json_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**json.load(f))\n+\n+ def to_json_file(self, json_file):\n+ with open(json_file, \"w\", encoding=\"utf-8\") as f:\n+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n+ f.write(content)\n+\n+ @classmethod\n+ def from_yaml_file(cls, yaml_file=None):\n+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n+ with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**yaml.safe_load(f))\n+\n+ def to_yaml_file(self, yaml_file):\n+ with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n+ yaml.safe_dump(self.to_dict(), f)\n+\n+ def __post_init__(self):\n+ if isinstance(self.distributed_type, str):\n+ self.compute_environment = ComputeEnvironment(self.compute_environment)\n+ if isinstance(self.distributed_type, str):\n+ self.distributed_type = DistributedType(self.distributed_type)\n+\n+\n+@dataclass\n+class ClusterConfig(BaseConfig):\n+ num_processes: int\n+ machine_rank: int = 0\n+ num_machines: int = 1", "from_author": true }, { "body": "open for better naming", "diff_hunk": "@@ -53,6 +53,38 @@ class DistributedType(str, Enum):\n TPU = \"TPU\"\n \n \n+class SageMakerDistributedType(str, Enum):\n+ \"\"\"\n+ Represents a type of distributed environment.\n+\n+ Values:\n+\n+ - **NO** -- Not a distributed environment, just a single process.\n+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.\n+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\n+ NO = \"NO\"\n+ DATA_PARALLEL = \"DATA_PARALLEL\"\n+ MODEL_PARALLEL = \"MODEL_PARALLEL\"\n+\n+\n+class ComputeEnvironment(str, Enum):\n+ \"\"\"\n+ Represents a type of the compute environment.\n+\n+ Values:\n+\n+ - **CUSTOM_CLUSTER** -- private/custom cluster hardware.\n+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.\n+ CUSTOM_CLUSTER = \"CUSTOM_CLUSTER\"", "from_author": true }, { "body": "This is a bit weird so would remove one .config by adding `config_command_parser` in the intermediate init.", "diff_hunk": "@@ -16,7 +16,7 @@\n \n from argparse import ArgumentParser\n \n-from accelerate.commands.config import config_command_parser\n+from accelerate.commands.config.config import config_command_parser", "from_author": false }, { "body": "Custom Cluster does sound a bit too complicated for the base config.\r\n```suggestion\r\n \"In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): \",\r\n```", "diff_hunk": "@@ -0,0 +1,85 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import argparse\n+import os\n+\n+from accelerate.state import ComputeEnvironment\n+\n+from .cluster import get_cluster_input\n+from .config_args import cache_dir, default_yaml_config_file\n+from .config_utils import _ask_field, _convert_compute_environment\n+from .sagemaker import get_sagemaker_input\n+\n+\n+def get_user_input():\n+ compute_environment = _ask_field(\n+ \"In which compute environment are you running? ([0] Custom Cluster, [1] AWS (Amazon SageMaker)): \",", "from_author": false }, { "body": "```suggestion\r\n if isinstance(self.compute_environment, str):\r\n```", "diff_hunk": "@@ -0,0 +1,119 @@\n+#!/usr/bin/env python\n+\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import json\n+import os\n+from dataclasses import dataclass\n+from enum import Enum\n+from typing import Optional, Union\n+\n+import yaml\n+from accelerate.state import ComputeEnvironment, DistributedType, SageMakerDistributedType\n+\n+\n+hf_cache_home = os.path.expanduser(\n+ os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n+)\n+cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n+\n+# For backward compatibility: the default config is the json one if it's the only existing file.\n+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n+ default_config_file = default_yaml_config_file\n+else:\n+ default_config_file = default_json_config_file\n+\n+\n+def load_config_from_file(config_file):\n+ config_file = config_file if config_file is not None else default_config_file\n+ with open(config_file, \"r\", encoding=\"utf-8\") as f:\n+ if config_file.endswith(\".json\"):\n+ if json.load(f)[\"compute_environment\"] == ComputeEnvironment.CUSTOM_CLUSTER:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_json_file(json_file=config_file)\n+ else:\n+ if yaml.safe_load(f)[\"compute_environment\"] == ComputeEnvironment.CUSTOM_CLUSTER:\n+ config_class = ClusterConfig\n+ else:\n+ config_class = SageMakerConfig\n+ return config_class.from_yaml_file(yaml_file=config_file)\n+\n+\n+@dataclass\n+class BaseConfig:\n+ compute_environment: ComputeEnvironment\n+ distributed_type: Union[DistributedType, SageMakerDistributedType]\n+ fp16: bool\n+\n+ def to_dict(self):\n+ result = self.__dict__\n+ # For serialization, it's best to convert Enums to strings (or their underlying value type).\n+ for key, value in result.items():\n+ if isinstance(value, Enum):\n+ result[key] = value.value\n+ return result\n+\n+ @classmethod\n+ def from_json_file(cls, json_file=None):\n+ json_file = default_json_config_file if json_file is None else json_file\n+ with open(json_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**json.load(f))\n+\n+ def to_json_file(self, json_file):\n+ with open(json_file, \"w\", encoding=\"utf-8\") as f:\n+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n+ f.write(content)\n+\n+ @classmethod\n+ def from_yaml_file(cls, yaml_file=None):\n+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n+ with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**yaml.safe_load(f))\n+\n+ def to_yaml_file(self, yaml_file):\n+ with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n+ yaml.safe_dump(self.to_dict(), f)\n+\n+ def __post_init__(self):\n+ if isinstance(self.distributed_type, str):", "from_author": false }, { "body": "Would also put those two functions in the intermediate init `accelerate.commands.config.__init__.py`", "diff_hunk": "@@ -23,8 +23,8 @@\n from pathlib import Path\n from typing import Optional\n \n-from accelerate.commands.config import LaunchConfig, default_config_file\n-from accelerate.state import DistributedType\n+from accelerate.commands.config.config_args import default_config_file, load_config_from_file", "from_author": false }, { "body": "I think `LOCAL_MACHINE` is better.", "diff_hunk": "@@ -53,6 +53,38 @@ class DistributedType(str, Enum):\n TPU = \"TPU\"\n \n \n+class SageMakerDistributedType(str, Enum):\n+ \"\"\"\n+ Represents a type of distributed environment.\n+\n+ Values:\n+\n+ - **NO** -- Not a distributed environment, just a single process.\n+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.\n+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.\n+ NO = \"NO\"\n+ DATA_PARALLEL = \"DATA_PARALLEL\"\n+ MODEL_PARALLEL = \"MODEL_PARALLEL\"\n+\n+\n+class ComputeEnvironment(str, Enum):\n+ \"\"\"\n+ Represents a type of the compute environment.\n+\n+ Values:\n+\n+ - **CUSTOM_CLUSTER** -- private/custom cluster hardware.\n+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.\n+ \"\"\"\n+\n+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.\n+ CUSTOM_CLUSTER = \"CUSTOM_CLUSTER\"", "from_author": false }, { "body": "Would define a public `is_boto3_available()` function instead of a private variable.", "diff_hunk": "@@ -25,6 +26,8 @@\n if is_tpu_available():\n import torch_xla.core.xla_model as xm\n \n+_has_boto3 = importlib.util.find_spec(\"boto3\") is not None\n+", "from_author": false }, { "body": "I think we can add an extra \"sagemaker\" with boto3 inside.", "diff_hunk": "@@ -17,8 +17,17 @@\n \n extras = {}\n extras[\"quality\"] = [\"black >= 20.8b1\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n-extras[\"docs\"] = [\"recommonmark\", \"sphinx==3.2.1\", \"sphinx-markdown-tables\", \"sphinx-rtd-theme==0.4.3\", \"sphinx-copybutton\"]\n-\n+extras[\"docs\"] = [\n+ \"recommonmark\",\n+ \"sphinx==3.2.1\",\n+ \"sphinx-markdown-tables\",\n+ \"sphinx-rtd-theme==0.4.3\",\n+ \"sphinx-copybutton\",\n+]\n+extras[\"test\"] = [\n+ \"pytest\",\n+ \"pytest-xdist\",\n+]\n setup(", "from_author": false } ], "context": [ { "body": "I canΒ΄t reproduce why `quality` is failing. I created a complete new conda env with the same packages. ", "from_author": true } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/17", "pr_id": 601085053 }, { "diff": "diff --git a/src/accelerate/commands/config.py b/src/accelerate/commands/config.py\nindex e6b5582a8..c32905932 100644\n--- a/src/accelerate/commands/config.py\n+++ b/src/accelerate/commands/config.py\n@@ -18,8 +18,10 @@\n import json\n import os\n from dataclasses import dataclass\n+from enum import Enum\n from typing import Optional\n \n+import yaml\n from accelerate.state import DistributedType\n \n \n@@ -27,7 +29,14 @@\n os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n )\n cache_dir = os.path.join(hf_cache_home, \"accelerate\")\n-default_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_json_config_file = os.path.join(cache_dir, \"default_config.json\")\n+default_yaml_config_file = os.path.join(cache_dir, \"default_config.yaml\")\n+\n+# For backward compatibility: the default config is the json one if it's the only existing file.\n+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):\n+ default_config_file = default_yaml_config_file\n+else:\n+ default_config_file = default_json_config_file\n \n \n @dataclass\n@@ -41,17 +50,39 @@ class LaunchConfig:\n main_process_port: Optional[int] = None\n main_training_function: str = \"main\"\n \n+ def __post_init__(self):\n+ if isinstance(self.distributed_type, str):\n+ self.distributed_type = DistributedType(self.distributed_type)\n+\n+ def to_dict(self):\n+ result = self.__dict__\n+ # For serialization, it's best to convert Enums to strings (or their underlying value type).\n+ for key, value in result.items():\n+ if isinstance(value, Enum):\n+ result[key] = value.value\n+ return result\n+\n @classmethod\n def from_json_file(cls, json_file=None):\n- json_file = default_config_file if json_file is None else json_file\n+ json_file = default_json_config_file if json_file is None else json_file\n with open(json_file, \"r\", encoding=\"utf-8\") as f:\n return cls(**json.load(f))\n \n def to_json_file(self, json_file):\n with open(json_file, \"w\", encoding=\"utf-8\") as f:\n- content = json.dumps(self.__dict__, indent=2, sort_keys=True) + \"\\n\"\n+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n f.write(content)\n \n+ @classmethod\n+ def from_yaml_file(cls, yaml_file=None):\n+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n+ with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**yaml.safe_load(f))\n+\n+ def to_yaml_file(self, yaml_file):\n+ with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n+ yaml.safe_dump(self.to_dict(), f)\n+\n \n def config_command_parser(subparsers=None):\n if subparsers is not None:\n@@ -169,9 +200,12 @@ def config_command(args):\n else:\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n- config_file = default_config_file\n+ config_file = default_yaml_config_file\n \n- config.to_json_file(config_file)\n+ if config_file.endswith(\".json\"):\n+ config.to_json_file(config_file)\n+ else:\n+ config.to_yaml_file(config_file)\n \n \n def main():\n", "code_comments": [ { "body": "```suggestion\r\n yaml.safe_dump(self.to_dict(), f)\r\n```\r\n\r\n> safe_dump(data, stream=None) serializes the given Python object into the stream. If stream is None, it returns the produced stream. safe_dump produces only standard YAML tags and cannot represent an arbitrary Python object.", "diff_hunk": "@@ -41,17 +50,39 @@ class LaunchConfig:\n main_process_port: Optional[int] = None\n main_training_function: str = \"main\"\n \n+ def __post_init__(self):\n+ if isinstance(self.distributed_type, str):\n+ self.distributed_type = DistributedType(self.distributed_type)\n+\n+ def to_dict(self):\n+ result = self.__dict__\n+ # For serialization, it's best to convert Enums to strings (or their underlying value type).\n+ for key, value in result.items():\n+ if isinstance(value, Enum):\n+ result[key] = value.value\n+ return result\n+\n @classmethod\n def from_json_file(cls, json_file=None):\n- json_file = default_config_file if json_file is None else json_file\n+ json_file = default_json_config_file if json_file is None else json_file\n with open(json_file, \"r\", encoding=\"utf-8\") as f:\n return cls(**json.load(f))\n \n def to_json_file(self, json_file):\n with open(json_file, \"w\", encoding=\"utf-8\") as f:\n- content = json.dumps(self.__dict__, indent=2, sort_keys=True) + \"\\n\"\n+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n f.write(content)\n \n+ @classmethod\n+ def from_yaml_file(cls, yaml_file=None):\n+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n+ with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n+ return cls(**yaml.safe_load(f))\n+\n+ def to_yaml_file(self, yaml_file):\n+ with open(yaml_file, \"w\", encoding=\"utf-8\") as f:\n+ yaml.dump(self.to_dict(), f)", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/16", "pr_id": 599807671 }, { "diff": "diff --git a/docs/source/index.rst b/docs/source/index.rst\nindex 065329a1d..57933888d 100644\n--- a/docs/source/index.rst\n+++ b/docs/source/index.rst\n@@ -145,4 +145,5 @@ Supported integrations\n :caption: API reference\n \n accelerator\n+ kwargs\n internal\ndiff --git a/docs/source/kwargs.rst b/docs/source/kwargs.rst\nnew file mode 100644\nindex 000000000..61f42cd11\n--- /dev/null\n+++ b/docs/source/kwargs.rst\n@@ -0,0 +1,30 @@\n+.. \n+ Copyright 2021 The HuggingFace Team. All rights reserved.\n+\n+ Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n+ the License. You may obtain a copy of the License at\n+\n+ http://www.apache.org/licenses/LICENSE-2.0\n+\n+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n+ an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n+ specific language governing permissions and limitations under the License.\n+\n+\n+Kwargs Handlers\n+=======================================================================================================================\n+\n+The following objects can be passed to the main :class:`~accelerate.Accelerator` to customize how some PyTorch objects\n+related to distributed training or mixed precision are created.\n+\n+\n+DistributedDataParallelKwargs\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+.. autoclass:: accelerate.DistributedDataParallelKwargs\n+\n+\n+GradScalerKwargs\n+-----------------------------------------------------------------------------------------------------------------------\n+\n+.. autoclass:: accelerate.GradScalerKwargs\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nindex 862fbce15..f4164613f 100644\n--- a/examples/cv_example.py\n+++ b/examples/cv_example.py\n@@ -22,7 +22,6 @@\n from torch.utils.data import DataLoader, Dataset\n \n import PIL\n-import torchvision\n from accelerate import Accelerator\n from timm import create_model\n from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 13333c4a4..aad2b8b65 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -14,6 +14,7 @@\n # limitations under the License.\n import argparse\n \n+import torch\n from torch.utils.data import DataLoader\n \n from accelerate import Accelerator, DistributedType\ndiff --git a/src/accelerate/__init__.py b/src/accelerate/__init__.py\nindex 1cc471903..da4f7402e 100644\n--- a/src/accelerate/__init__.py\n+++ b/src/accelerate/__init__.py\n@@ -5,5 +5,6 @@\n __version__ = \"0.1.0\"\n \n from .accelerator import Accelerator\n+from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs\n from .state import DistributedType\n from .utils import synchronize_rng_states\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 4707e0afe..d9f7cf1bc 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -19,6 +19,7 @@\n from packaging import version\n \n from .data_loader import prepare_data_loader\n+from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, KwargsHandler\n from .optimizer import AcceleratedOptimizer\n from .state import AcceleratorState, DistributedType\n from .utils import RNGType, extract_model_from_parallel, gather, save, wait_for_everyone\n@@ -56,6 +57,9 @@ class Accelerator:\n \n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n+ kwargs_handlers (list of kwargs handlers, `optional`)\n+ A list of :obj:`KwargHandler` to customize how the objects related to distributed training or mixed\n+ precision are created. See :doc:`kwargs` for more information.\n \n Attributes\n \n@@ -70,18 +74,37 @@ def __init__(\n fp16: bool = None,\n cpu: bool = False,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n+ kwargs_handlers: Optional[List[KwargsHandler]] = None,\n ):\n self.state = AcceleratorState(fp16=fp16, cpu=cpu, _from_accelerator=True)\n \n self.device_placement = device_placement\n self.split_batches = split_batches\n \n+ # Kwargs handlers\n+ self.ddp_handler = None\n+ self.scaler_handler = None\n+ if kwargs_handlers is not None:\n+ for handler in kwargs_handlers:\n+ assert isinstance(handler, KwargsHandler), f\"Unsupported kwargs handler passed: {handler}.\"\n+ if isinstance(handler, DistributedDataParallelKwargs):\n+ if self.ddp_handler is not None:\n+ raise ValueError(\"You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.\")\n+ else:\n+ self.ddp_handler = handler\n+ elif isinstance(handler, GradScalerKwargs):\n+ if self.scaler_handler is not None:\n+ raise ValueError(\"You can only pass one `GradScalerKwargs` in `kwargs_handler`.\")\n+ else:\n+ self.scaler_handler = handler\n+\n # Mixed precision attributes\n self.scaler = None\n self.native_amp = False\n if self.state.use_fp16:\n self.native_amp = version.parse(torch.__version__) >= version.parse(\"1.6\")\n- self.scaler = torch.cuda.amp.GradScaler()\n+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}\n+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n \n # Internal references to the training objects\n self._optimizers = []\n@@ -187,16 +210,18 @@ def prepare(self, *args):\n if isinstance(obj, torch.optim.Optimizer):\n obj._switch_parameters(mapping)\n \n- return result\n+ return result if len(result) > 1 else result[0]\n \n def prepare_model(self, model):\n if self.device_placement:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.local_process_index],\n output_device=self.local_process_index,\n+ **kwargs,\n )\n if self.native_amp:\n model.forward = torch.cuda.amp.autocast()(model.forward)\ndiff --git a/src/accelerate/kwargs_handlers.py b/src/accelerate/kwargs_handlers.py\nnew file mode 100644\nindex 000000000..7f52d6639\n--- /dev/null\n+++ b/src/accelerate/kwargs_handlers.py\n@@ -0,0 +1,73 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import copy\n+from dataclasses import dataclass\n+\n+\n+class KwargsHandler:\n+ \"\"\"\n+ Internal mixin that implements a :obj:`to_kwargs()` method for a dataclass.\n+ \"\"\"\n+\n+ def to_dict(self):\n+ return copy.deepcopy(self.__dict__)\n+\n+ def to_kwargs(self):\n+ \"\"\"\n+ Returns a dictionary containing the attributes with values different from the default of this class.\n+ \"\"\"\n+ default_dict = self.__class__().to_dict()\n+ this_dict = self.to_dict()\n+ return {k: v for k, v in this_dict.items() if default_dict[k] != v}\n+\n+\n+@dataclass\n+class DistributedDataParallelKwargs(KwargsHandler):\n+ \"\"\"\n+ Use this object in your :class:`~accelerate.Accelerator` to customize how your model is wrapped in a\n+ :obj:`torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this `wrapper\n+ <https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html>`__ for more information\n+ on each argument.\n+\n+ .. warning::\n+\n+ :obj:`gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.\n+ \"\"\"\n+\n+ dim: int = 0\n+ broadcast_buffers: bool = True\n+ bucket_cap_mb: int = 25\n+ find_unused_parameters: bool = False\n+ check_reduction: bool = False\n+ gradient_as_bucket_view: bool = False\n+\n+\n+@dataclass\n+class GradScalerKwargs(KwargsHandler):\n+ \"\"\"\n+ Use this object in your :class:`~accelerate.Accelerator` to customize the behavior of mixed precision, specifically\n+ how the :obj:`torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this `scaler\n+ <https://pytorch.org/docs/stable/amp.html?highlight=gradscaler>`__ for more information on each argument.\n+\n+ .. warning::\n+\n+ :obj:`GradScaler` is only available in PyTorch 1.5.0 and later versions.\n+ \"\"\"\n+\n+ init_scale: float = 65536.0\n+ growth_factor: float = 2.0\n+ backoff_factor: float = 0.5\n+ growth_interval: int = 2000\n+ enabled: bool = True\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex a8b95d2d2..55d00ab5d 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -64,7 +64,7 @@ def param_groups(self):\n return self.optimizer.param_groups\n \n @param_groups.setter\n- def param_group(self, param_groups):\n+ def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n \n @property\n@@ -72,7 +72,7 @@ def defaults(self):\n return self.optimizer.defaults\n \n @defaults.setter\n- def param_group(self, defaults):\n+ def defaults(self, defaults):\n self.optimizer.defaults = defaults\n \n def add_param_group(self, param_group):\ndiff --git a/src/accelerate/test_utils/__init__.py b/src/accelerate/test_utils/__init__.py\nindex bb9f0f66e..03aef45b3 100644\n--- a/src/accelerate/test_utils/__init__.py\n+++ b/src/accelerate/test_utils/__init__.py\n@@ -2,5 +2,5 @@\n # There's no way to ignore \"F401 '...' imported but unused\" warnings in this\n # module, but to preserve other warnings. So, don't check this module at all.\n \n-from .testing import are_the_same_tensors, execute_subprocess_async, require_multi_gpu, require_tpu\n+from .testing import are_the_same_tensors, execute_subprocess_async, require_cuda, require_multi_gpu, require_tpu\n from .training import RegressionDataset, RegressionModel\ndiff --git a/src/accelerate/test_utils/testing.py b/src/accelerate/test_utils/testing.py\nindex d01b5276e..507f16eb8 100644\n--- a/src/accelerate/test_utils/testing.py\n+++ b/src/accelerate/test_utils/testing.py\n@@ -33,10 +33,19 @@ def are_the_same_tensors(tensor):\n return True\n \n \n+def require_cuda(test_case):\n+ \"\"\"\n+ Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.\n+ \"\"\"\n+ if not torch.cuda.is_available():\n+ return unittest.skip(\"test requires a GPU\")(test_case)\n+ else:\n+ return test_case\n+\n+\n def require_tpu(test_case):\n \"\"\"\n Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.\n-\n \"\"\"\n if not is_tpu_available():\n return unittest.skip(\"test requires TPU\")(test_case)\ndiff --git a/tests/test_kwargs_handlers.py b/tests/test_kwargs_handlers.py\nnew file mode 100644\nindex 000000000..aaf15de0c\n--- /dev/null\n+++ b/tests/test_kwargs_handlers.py\n@@ -0,0 +1,96 @@\n+# Copyright 2021 The HuggingFace Team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import inspect\n+import os\n+import sys\n+import unittest\n+from dataclasses import dataclass\n+\n+import torch\n+\n+from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs\n+from accelerate.kwargs_handlers import KwargsHandler\n+from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu\n+\n+\n+@dataclass\n+class MockClass(KwargsHandler):\n+ a: int = 0\n+ b: bool = False\n+ c: float = 3.0\n+\n+\n+class DataLoaderTester(unittest.TestCase):\n+ def test_kwargs_handler(self):\n+ # If no defaults are changed, `to_kwargs` returns an empty dict.\n+ self.assertDictEqual(MockClass().to_kwargs(), {})\n+ self.assertDictEqual(MockClass(a=2).to_kwargs(), {\"a\": 2})\n+ self.assertDictEqual(MockClass(a=2, b=True).to_kwargs(), {\"a\": 2, \"b\": True})\n+ self.assertDictEqual(MockClass(a=2, c=2.25).to_kwargs(), {\"a\": 2, \"c\": 2.25})\n+\n+ @require_cuda\n+ def test_grad_scaler_kwargs(self):\n+ # If no defaults are changed, `to_kwargs` returns an empty dict.\n+ scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2)\n+ accelerator = Accelerator(fp16=True, kwargs_handlers=[scaler_handler])\n+ print(accelerator.use_fp16)\n+ scaler = accelerator.scaler\n+\n+ # Check the kwargs have been applied\n+ self.assertEqual(scaler._init_scale, 1024.0)\n+ self.assertEqual(scaler._growth_factor, 2.0)\n+\n+ # Check the other values are at the default\n+ self.assertEqual(scaler._backoff_factor, 0.5)\n+ self.assertEqual(scaler._growth_interval, 2000)\n+ self.assertEqual(scaler._enabled, True)\n+\n+ @require_multi_gpu\n+ def test_ddp_kwargs(self):\n+ distributed_args = f\"\"\"\n+ -m torch.distributed.launch\n+ --nproc_per_node={torch.cuda.device_count()}\n+ --use_env\n+ {inspect.getfile(self.__class__)}\n+ \"\"\".split()\n+ cmd = [sys.executable] + distributed_args\n+ execute_subprocess_async(cmd, env=os.environ.copy())\n+\n+\n+if __name__ == \"__main__\":\n+ ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)\n+ accelerator = Accelerator(kwargs_handlers=[ddp_scaler])\n+ model = torch.nn.Linear(100, 200)\n+ model = accelerator.prepare(model)\n+\n+ # Check the values changed in kwargs\n+ error_msg = \"\"\n+ observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024)\n+ if observed_bucket_cap_map != 15:\n+ error_msg += f\"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\\n\"\n+ if model.find_unused_parameters is not True:\n+ error_msg += f\"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\\n\"\n+\n+ # Check the values of the defaults\n+ if model.dim != 0:\n+ error_msg += f\"Default value not respected, should have `0` but found {model.dim}.\\n\"\n+ if model.broadcast_buffers is not True:\n+ error_msg += f\"Default value not respected, should have `True` but found {model.broadcast_buffers}.\\n\"\n+ if model.gradient_as_bucket_view is not False:\n+ error_msg += f\"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\\n\"\n+\n+ # Raise error at the end to make sure we don't stop at the first failure.\n+ if len(error_msg) > 0:\n+ raise ValueError(error_msg)\n", "code_comments": [ { "body": "Unrelated to this PR, this is to make quality pass.", "diff_hunk": "@@ -22,7 +22,6 @@\n from torch.utils.data import DataLoader, Dataset\n \n import PIL\n-import torchvision", "from_author": true }, { "body": "Unrelated to this PR, this is to make quality pass.", "diff_hunk": "@@ -14,6 +14,7 @@\n # limitations under the License.\n import argparse\n \n+import torch", "from_author": true }, { "body": "Unrelated to this PR, this is to make quality pass.", "diff_hunk": "@@ -64,15 +64,15 @@ def param_groups(self):\n return self.optimizer.param_groups\n \n @param_groups.setter\n- def param_group(self, param_groups):\n+ def param_groups(self, param_groups):", "from_author": true }, { "body": "Unrelated to this PR, this is to make quality pass.", "diff_hunk": "@@ -64,15 +64,15 @@ def param_groups(self):\n return self.optimizer.param_groups\n \n @param_groups.setter\n- def param_group(self, param_groups):\n+ def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n \n @property\n def defaults(self):\n return self.optimizer.defaults\n \n @defaults.setter\n- def param_group(self, defaults):\n+ def defaults(self, defaults):", "from_author": true } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/15", "pr_id": 598136435 }, { "diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 54ef3c12f..61a39fc95 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -105,10 +105,7 @@ def send_to_device(tensor, device):\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n- raise TypeError(\n- f\"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts \"\n- \"of tensors or objects having a `to` method.\"\n- )\n+ return tensor\n return tensor.to(device)\n \n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/13", "pr_id": 595121308 }, { "diff": "diff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex 0a86f41b9..1e3ac4e89 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -207,7 +207,8 @@ lof of time. In practice, that means you must take special care to have all your\n shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layer with for loops that\n have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.\n \n-To introduce special behavior in your script for TPUs you can check the :obj:`distributed_type` of your :obj:`accelerator`:\n+To introduce special behavior in your script for TPUs you can check the :obj:`distributed_type` of your\n+:obj:`accelerator`:\n \n .. code-block:: python\n \n@@ -356,9 +357,10 @@ setting the same seed in the main random number generator in all processes.\n \n .. Warning::\n \n- Synchronization the main torch (or CUDA or XLA) random number generator will affect any other potential random artifacts you could have in your\n- dataset (like random data augmentation) in the sense all processes will get the same random numbers from the torch\n- random modules (so will apply the same random data augmentation if it's controlled by torch).\n+ Synchronization the main torch (or CUDA or XLA) random number generator will affect any other potential random\n+ artifacts you could have in your dataset (like random data augmentation) in the sense all processes will get the\n+ same random numbers from the torch random modules (so will apply the same random data augmentation if it's\n+ controlled by torch).\n \n .. Note::\n \ndiff --git a/examples/README.md b/examples/README.md\nindex 30b376cef..be636e63c 100644\n--- a/examples/README.md\n+++ b/examples/README.md\n@@ -18,7 +18,13 @@ limitations under the License.\n \n ## Simple NLP example\n \n-The [nlp_example.py](./nlp_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC]()).\n+The [nlp_example.py](./nlp_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398)).\n+\n+Prior to running it you should install πŸ€— Dataset and πŸ€— Transformers:\n+\n+```bash\n+pip install datasets, transformers\n+```\n \n The same script can be run in any of the following configurations:\n - single CPU or single GPU\n@@ -89,3 +95,91 @@ To run it in each of these various modes, use the following commands:\n ```\n * In PyTorch:\n Add an `xmp.spawn` line in your script as you usually do.\n+\n+\n+## Simple vision example\n+\n+The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a ResNet-50 on a classification task ([Ofxord-IIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)).\n+\n+The same script can be run in any of the following configurations:\n+- single CPU or single GPU\n+- multi GPUS (using PyTorch distributed mode)\n+- (multi) TPUs\n+- fp16 (mixed-precision) or fp32 (normal precision)\n+\n+Prior to running it you should install timm and torchvision:\n+\n+```bash\n+pip install timm, torchvision\n+```\n+\n+and you should download the data with the following commands:\n+\n+```bash\n+wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\n+tar -xzf images.tar.gz\n+```\n+\n+To run it in each of these various modes, use the following commands:\n+- single CPU:\n+ * from a server without GPU\n+ ```bash\n+ python ./cv_example.py --data_dir path_to_data\n+ ```\n+ * from any server by passing `cpu=True` to the `Accelerator`.\n+ ```bash\n+ python ./cv_example.py --data_dir path_to_data --cpu\n+ ```\n+ * from any server with Accelerate launcher\n+ ```bash\n+ accelerate launch --cpu ./cv_example.py --data_dir path_to_data\n+ ```\n+- single GPU:\n+ ```bash\n+ python ./nlp_example.py # from a server with a GPU\n+ ```\n+- with fp16 (mixed-precision)\n+ * from any server by passing `fp16=True` to the `Accelerator`.\n+ ```bash\n+ python ./cv_example.py --data_dir path_to_data --fp16\n+ ```\n+ * from any server with Accelerate launcher\n+ ```bash\n+ accelerate launch --fb16 ./cv_example.py --data_dir path_to_data\n+- multi GPUS (using PyTorch distributed mode)\n+ * With Accelerate config and launcher\n+ ```bash\n+ accelerate config # This will create a config file on your server\n+ accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on your server\n+ ```\n+ * With traditional PyTorch launcher\n+ ```bash\n+ python -m torch.distributed.launch --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data\n+ ```\n+- multi GPUs, multi node (several machines, using PyTorch distributed mode)\n+ * With Accelerate config and launcher, on each machine:\n+ ```bash\n+ accelerate config # This will create a config file on each server\n+ accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server\n+ ```\n+ * With PyTorch launcher only\n+ ```bash\n+ python -m torch.distributed.launch --nproc_per_node 2 \\\n+ --use_env \\\n+ --node_rank 0 \\\n+ --master_addr master_node_ip_address \\\n+ ./cv_example.py --data_dir path_to_data # On the first server\n+ python -m torch.distributed.launch --nproc_per_node 2 \\\n+ --use_env \\\n+ --node_rank 1 \\\n+ --master_addr master_node_ip_address \\\n+ ./cv_example.py --data_dir path_to_data # On the second server\n+ ```\n+- (multi) TPUs\n+ * With Accelerate config and launcher\n+ ```bash\n+ accelerate config # This will create a config file on your TPU server\n+ accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server\n+ ```\n+ * In PyTorch:\n+ Add an `xmp.spawn` line in your script as you usually do.\ndiff --git a/examples/cv_example.py b/examples/cv_example.py\nnew file mode 100644\nindex 000000000..862fbce15\n--- /dev/null\n+++ b/examples/cv_example.py\n@@ -0,0 +1,198 @@\n+# coding=utf-8\n+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+import argparse\n+import os\n+import re\n+\n+import numpy as np\n+import torch\n+from torch.optim.lr_scheduler import OneCycleLR\n+from torch.utils.data import DataLoader, Dataset\n+\n+import PIL\n+import torchvision\n+from accelerate import Accelerator\n+from timm import create_model\n+from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor\n+\n+\n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example trains a ResNet50 on the Oxford-IIT Pet Dataset\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/tree/main/examples\n+#\n+########################################################################\n+\n+\n+# Function to get the label from the filename\n+def extract_label(fname):\n+ stem = fname.split(os.path.sep)[-1]\n+ return re.search(r\"^(.*)_\\d+\\.jpg$\", stem).groups()[0]\n+\n+\n+class PetsDataset(Dataset):\n+ def __init__(self, file_names, image_transform=None, label_to_id=None):\n+ self.file_names = file_names\n+ self.image_transform = image_transform\n+ self.label_to_id = label_to_id\n+\n+ def __len__(self):\n+ return len(self.file_names)\n+\n+ def __getitem__(self, idx):\n+ fname = self.file_names[idx]\n+ raw_image = PIL.Image.open(fname)\n+ image = raw_image.convert(\"RGB\")\n+ if self.image_transform is not None:\n+ image = self.image_transform(image)\n+ label = extract_label(fname)\n+ if self.label_to_id is not None:\n+ label = self.label_to_id[label]\n+ return {\"image\": image, \"label\": label}\n+\n+\n+def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)\n+\n+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n+ lr = config[\"lr\"]\n+ num_epochs = int(config[\"num_epochs\"])\n+ seed = int(config[\"seed\"])\n+ batch_size = int(config[\"batch_size\"])\n+ image_size = config[\"image_size\"]\n+ if not isinstance(image_size, (list, tuple)):\n+ image_size = (image_size, image_size)\n+\n+ # Grab all the image filenames\n+ file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(\".jpg\")]\n+\n+ # Build the label correspondences\n+ all_labels = [extract_label(fname) for fname in file_names]\n+ id_to_label = list(set(all_labels))\n+ id_to_label.sort()\n+ label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}\n+\n+ # Set the seed before splitting the data.\n+ np.random.seed(seed)\n+ torch.manual_seed(seed)\n+ torch.cuda.manual_seed_all(seed)\n+\n+ # Split our filenames between train and validation\n+ random_perm = np.random.permutation(len(file_names))\n+ cut = int(0.8 * len(file_names))\n+ train_split = random_perm[:cut]\n+ eval_split = random_perm[cut:]\n+\n+ # For training we use a simple RandomResizedCrop\n+ train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()])\n+ train_dataset = PetsDataset(\n+ [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id\n+ )\n+\n+ # For evaluation, we use a deterministic Resize\n+ eval_tfm = Compose([Resize(image_size), ToTensor()])\n+ eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)\n+\n+ # Instantiate dataloaders.\n+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)\n+ eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)\n+\n+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n+ model = create_model(\"resnet50d\", pretrained=True, num_classes=len(label_to_id))\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+ model = model.to(accelerator.device)\n+\n+ # Freezing the base model\n+ for param in model.parameters():\n+ param.requires_grad = False\n+ for param in model.get_classifier().parameters():\n+ param.requires_grad = True\n+\n+ # We normalize the batches of images to be a bit faster.\n+ mean = torch.tensor(model.default_cfg[\"mean\"])[None, :, None, None].to(accelerator.device)\n+ std = torch.tensor(model.default_cfg[\"std\"])[None, :, None, None].to(accelerator.device)\n+\n+ # Instantiate optimizer\n+ optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)\n+\n+ # Prepare everything\n+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n+ # prepare method.\n+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n+ model, optimizer, train_dataloader, eval_dataloader\n+ )\n+\n+ # Instantiate learning rate scheduler after preparing the training dataloader as the prepare method\n+ # may change its length.\n+ lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))\n+\n+ # Now we train the model\n+ for epoch in range(num_epochs):\n+ model.train()\n+ for step, batch in enumerate(train_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n+ inputs = (batch[\"image\"] - mean) / std\n+ outputs = model(inputs)\n+ loss = torch.nn.functional.cross_entropy(outputs, batch[\"label\"])\n+ accelerator.backward(loss)\n+ optimizer.step()\n+ lr_scheduler.step()\n+ optimizer.zero_grad()\n+\n+ model.eval()\n+ accurate = 0\n+ num_elems = 0\n+ for step, batch in enumerate(eval_dataloader):\n+ # We could avoid this line since we set the accelerator with `device_placement=True`.\n+ batch = {k: v.to(accelerator.device) for k, v in batch.items()}\n+ inputs = (batch[\"image\"] - mean) / std\n+ with torch.no_grad():\n+ outputs = model(inputs)\n+ predictions = outputs.argmax(dim=-1)\n+ accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch[\"label\"])\n+ num_elems += accurate_preds.shape[0]\n+ accurate += accurate_preds.long().sum()\n+\n+ eval_metric = accurate.item() / num_elems\n+ # Use accelerator.print to print only on the main process.\n+ accelerator.print(f\"epoch {epoch}: {100 * eval_metric:.2f}\")\n+\n+\n+def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\"--data_dir\", required=True, help=\"The data folder on disk.\")\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n+ args = parser.parse_args()\n+ config = {\"lr\": 3e-2, \"num_epochs\": 3, \"seed\": 42, \"batch_size\": 64, \"image_size\": 224}\n+ training_function(config, args)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 859309a7a..13333c4a4 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -30,7 +30,7 @@\n ########################################################################\n # This is a fully working simple example to use Accelerate\n #\n-# This example train a Bert base model on GLUE MRPC\n+# This example trains a Bert base model on GLUE MRPC\n # in any of the following settings (with the same script):\n # - single CPU or single GPU\n # - multi GPUS (using PyTorch distributed mode)\n@@ -127,7 +127,7 @@ def collate_fn(examples):\n num_training_steps=len(train_dataloader) * num_epochs,\n )\n \n- # Now we train the model - We prune bad trials after each epoch if needed\n+ # Now we train the model\n for epoch in range(num_epochs):\n model.train()\n for step, batch in enumerate(train_dataloader):\n@@ -146,7 +146,8 @@ def collate_fn(examples):\n for step, batch in enumerate(eval_dataloader):\n # We could avoid this line since we set the accelerator with `device_placement=True`.\n batch.to(accelerator.device)\n- outputs = model(**batch)\n+ with torch.no_grad():\n+ outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=accelerator.gather(predictions),\n@@ -160,16 +161,8 @@ def collate_fn(examples):\n \n def main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n- parser.add_argument(\n- \"--fp16\",\n- action=\"store_true\",\n- help=\"If passed, will use FP16 training.\",\n- )\n- parser.add_argument(\n- \"--cpu\",\n- action=\"store_true\",\n- help=\"If passed, will train on the CPU.\",\n- )\n+ parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n+ parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n training_function(config, args)\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 4bb404c39..4707e0afe 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -51,9 +51,9 @@ class Accelerator:\n - :obj:`\"torch\"`: the base torch random number generator\n - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n- - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler\n- in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n- \n+ - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler in\n+ your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n+\n Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n >= 1.6.\n \ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex fa364ef6e..061c289ea 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -27,10 +27,10 @@\n from accelerate.state import DistributedType\n \n \n-class _AddOneArg():\n+class _AddOneArg:\n def __init__(self, launcher):\n self.launcher = launcher\n- \n+\n def __call__(self, index):\n self.launcher()\n \ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 937e4598d..24a7ad08a 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -339,8 +339,8 @@ def prepare_data_loader(\n - :obj:`\"torch\"`: the base torch random number generator\n - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n- - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler\n- in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n+ - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler in\n+ your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n Returns:\n :obj:`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\ndiff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\nindex 3a6046325..a8b95d2d2 100644\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -63,6 +63,18 @@ def __init__(self, optimizer, device_placement=True, scaler=None):\n def param_groups(self):\n return self.optimizer.param_groups\n \n+ @param_groups.setter\n+ def param_group(self, param_groups):\n+ self.optimizer.param_groups = param_groups\n+\n+ @property\n+ def defaults(self):\n+ return self.optimizer.defaults\n+\n+ @defaults.setter\n+ def param_group(self, defaults):\n+ self.optimizer.defaults = defaults\n+\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n \ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex 62543a17b..2e5095533 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -14,8 +14,6 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from packaging import version\n-\n import torch\n from torch.utils.data import DataLoader\n \n@@ -24,6 +22,7 @@\n from accelerate.state import AcceleratorState, DistributedType\n from accelerate.test_utils import RegressionDataset, RegressionModel, are_the_same_tensors\n from accelerate.utils import gather, set_seed, synchronize_rng_states\n+from packaging import version\n \n \n def init_state_check():\n@@ -201,6 +200,7 @@ def training_check():\n assert torch.allclose(old_model.a, model.a)\n assert torch.allclose(old_model.b, model.b)\n \n+\n def main():\n accelerator = Accelerator()\n state = accelerator.state\n@@ -220,5 +220,6 @@ def main():\n print(\"\\n**Training integration test**\")\n training_check()\n \n+\n if __name__ == \"__main__\":\n main()\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 4a078b475..54ef3c12f 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -61,7 +61,7 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona\n elif rng_type == RNGType.GENERATOR:\n assert generator is not None, \"Need a generator to synchronize its seed.\"\n rng_state = generator.get_state()\n- \n+\n # Broadcast the rng state from device 0 to other devices\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n", "code_comments": [ { "body": "PyTorch optimizers are expected to have thos attributes setable, so I'm obliging.", "diff_hunk": "@@ -63,6 +63,18 @@ def __init__(self, optimizer, device_placement=True, scaler=None):\n def param_groups(self):\n return self.optimizer.param_groups\n \n+ @param_groups.setter\n+ def param_group(self, param_groups):\n+ self.optimizer.param_groups = param_groups\n+\n+ @property\n+ def defaults(self):\n+ return self.optimizer.defaults\n+\n+ @defaults.setter\n+ def param_group(self, defaults):\n+ self.optimizer.defaults = defaults", "from_author": true } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/10", "pr_id": 591809725 }, { "diff": "diff --git a/README.md b/README.md\nindex 0b28b6a50..219b93248 100644\n--- a/README.md\n+++ b/README.md\n@@ -214,7 +214,7 @@ for epoch in range(10):\n \n + from accelerate import Accelerator\n + accelerator = Accelerator()\n-+ device = accelerator.device\n+-\n \n + model = torch.nn.Transformer()\n optim = torch.optim.Adam(\n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/9", "pr_id": 588693991 }, { "diff": "diff --git a/docs/source/internal.rst b/docs/source/internal.rst\nindex 4c6d895b9..2d8f0f453 100644\n--- a/docs/source/internal.rst\n+++ b/docs/source/internal.rst\n@@ -78,6 +78,8 @@ Utilities\n \n .. autofunction:: accelerate.utils.set_seed\n \n+.. autofunction:: accelerate.utils.synchronize_rng_state\n+\n .. autofunction:: accelerate.utils.synchronize_rng_states\n \n .. autofunction:: accelerate.utils.wait_for_everyone\ndiff --git a/docs/source/quicktour.rst b/docs/source/quicktour.rst\nindex f74ca9eea..0a86f41b9 100644\n--- a/docs/source/quicktour.rst\n+++ b/docs/source/quicktour.rst\n@@ -340,20 +340,29 @@ library handles the sharding of your data between processes by changing that :ob\n \n The :class:`~accelerate.data_loader.DataLoaderShard` subclasses :obj:`DataLoader` to add the following functionality:\n \n-- it synchronizes the torch random number generators of all processes at each new iteration, to ensure any\n+- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any\n randomization (like shuffling) is done the exact same way across processes.\n - it puts the batches on the proper device before yielding them (unless you have opted out of\n :obj:`device_placement=True`).\n \n+The random number generator synchronization will by default synchronize:\n+\n+- the :obj:`generator` attribute of a given sampler (like the PyTorch :obj:`RandomSampler`) for PyTorch >= 1.6\n+- the main random number generator in PyTorch <=1.5.1\n+\n+You can choose which random number generator(s) to synchronize with the :obj:`rng_types` argument of the main\n+:class:`~accelerate.Accelerator`. In PyTorch >= 1.6, it is recommended to rely on local :obj:`generator`s to avoid\n+setting the same seed in the main random number generator in all processes.\n+\n .. Warning::\n \n- The random number generator synchronization will affect any other potential random artifacts you could have in your\n+ Synchronization the main torch (or CUDA or XLA) random number generator will affect any other potential random artifacts you could have in your\n dataset (like random data augmentation) in the sense all processes will get the same random numbers from the torch\n- random modules (so will apply the same random data augmentation if it's controlled by torch). While this is usually\n- fine, you should use the random number generator from the Python :obj:`random` module or NumPy for your data\n- augmentation if you think this will be a problem.\n+ random modules (so will apply the same random data augmentation if it's controlled by torch).\n+\n+.. Note::\n \n- The randomization part of your sampler on the other hand should absolutely be done using the torch random number\n- generator (like in the traditional :obj:`RandomSampler`).\n+ The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local\n+ :obj:`torch.Generator` object (in PyTorch >= 1.6), see the traditional :obj:`RandomSampler`, as an example.\n \n See more details about the internal in the :doc:`Internals page <internal>`.\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex a2f407ba4..4bb404c39 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -12,6 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from typing import List, Optional, Union\n+\n import torch\n \n from packaging import version\n@@ -19,7 +21,7 @@\n from .data_loader import prepare_data_loader\n from .optimizer import AcceleratedOptimizer\n from .state import AcceleratorState, DistributedType\n-from .utils import extract_model_from_parallel, gather, save, wait_for_everyone\n+from .utils import RNGType, extract_model_from_parallel, gather, save, wait_for_everyone\n \n \n class Accelerator:\n@@ -42,6 +44,18 @@ class Accelerator:\n cpu (:obj:`bool`, `optional`):\n Whether or not to force the script to execute on CPU. Will ignore GPU available if set to :obj:`True` and\n force the execution on one process only.\n+ rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n+ The list of random number generators to synchronize at the beginning of each iteration in your prepared\n+ dataloaders. Should be one or several of:\n+\n+ - :obj:`\"torch\"`: the base torch random number generator\n+ - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n+ - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n+ - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler\n+ in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n+ \n+ Will default to :obj:`[\"torch\"]` for PyTorch versions <=1.5.1 and :obj:`[\"generator\"]` for PyTorch versions\n+ >= 1.6.\n \n Attributes\n \n@@ -50,7 +64,12 @@ class Accelerator:\n \"\"\"\n \n def __init__(\n- self, device_placement: bool = True, split_batches: bool = False, fp16: bool = None, cpu: bool = False\n+ self,\n+ device_placement: bool = True,\n+ split_batches: bool = False,\n+ fp16: bool = None,\n+ cpu: bool = False,\n+ rng_types: Optional[List[Union[str, RNGType]]] = None,\n ):\n self.state = AcceleratorState(fp16=fp16, cpu=cpu, _from_accelerator=True)\n \n@@ -67,6 +86,10 @@ def __init__(\n # Internal references to the training objects\n self._optimizers = []\n \n+ # RNG Types\n+ if rng_types is None:\n+ self.rng_types = [\"torch\"] if version.parse(torch.__version__) <= version.parse(\"1.5.1\") else [\"generator\"]\n+\n @property\n def distributed_type(self):\n return self.state.distributed_type\n@@ -187,6 +210,7 @@ def prepare_data_loader(self, data_loader):\n process_index=self.process_index,\n split_batches=self.split_batches,\n put_on_device=self.device_placement,\n+ rng_types=self.rng_types,\n )\n \n def prepare_optimizer(self, optimizer):\ndiff --git a/src/accelerate/data_loader.py b/src/accelerate/data_loader.py\nindex 92bfc3c99..937e4598d 100644\n--- a/src/accelerate/data_loader.py\n+++ b/src/accelerate/data_loader.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Optional\n+from typing import List, Optional, Union\n \n import torch\n from torch.utils.data import BatchSampler, DataLoader, IterableDataset\n@@ -20,7 +20,7 @@\n from packaging import version\n \n from .state import AcceleratorState, DistributedType, is_tpu_available\n-from .utils import send_to_device, synchronize_rng_states\n+from .utils import RNGType, send_to_device, synchronize_rng_states\n \n \n if is_tpu_available():\n@@ -262,16 +262,29 @@ class DataLoaderShard(DataLoader):\n The dataset to use to build this datalaoder.\n device (:obj:`torch.device`, `optional`):\n If passed, the device to put all batches on.\n+ rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or\n+ several of:\n+\n+ - :obj:`\"torch\"`: the base torch random number generator\n+ - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n+ - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n+ - :obj:`\"generator\"`: an optional :obj:`torch.Generator`\n+ generator (:obj:`torch.Generator`, `optional`):\n+ A random number generator to keep synchronized accross processes.\n kwargs:\n All other keyword arguments to pass to the regular :obj:`DataLoader` initialization.\n \"\"\"\n \n- def __init__(self, dataset, device=None, **kwargs):\n+ def __init__(self, dataset, device=None, rng_types=None, generator=None, **kwargs):\n super().__init__(dataset, **kwargs)\n self.device = device\n+ self.rng_types = rng_types\n+ self.generator = generator\n \n def __iter__(self):\n- synchronize_rng_states()\n+ if self.rng_types is not None:\n+ synchronize_rng_states(self.rng_types, self.generator)\n state = AcceleratorState()\n for batch in super().__iter__():\n if state.distributed_type == DistributedType.TPU:\n@@ -286,6 +299,7 @@ def prepare_data_loader(\n process_index: Optional[int] = None,\n split_batches: bool = False,\n put_on_device: bool = False,\n+ rng_types: Optional[List[Union[str, RNGType]]] = None,\n ) -> DataLoader:\n \"\"\"\n Wraps a PyTorch :obj:`DataLoader` to generate batches for one of the processes only.\n@@ -318,6 +332,15 @@ def prepare_data_loader(\n put_on_device (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to put the batches on :obj:`device` (only works if the batches are nested list, tuples or\n dictionaries of tensors).\n+ rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):\n+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or\n+ several of:\n+\n+ - :obj:`\"torch\"`: the base torch random number generator\n+ - :obj:`\"cuda\"`: the CUDA random number generator (GPU only)\n+ - :obj:`\"xla\"`: the XLA random number generator (TPU only)\n+ - :obj:`\"generator\"`: the :obj:`torch.Generator` of the sampler (or batch sampler if there is no sampler\n+ in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n \n Returns:\n :obj:`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n@@ -342,9 +365,12 @@ def prepare_data_loader(\n \n new_dataset = dataloader.dataset\n new_batch_sampler = dataloader.batch_sampler\n+ generator = getattr(dataloader, \"generator\", None)\n # No change if no multiprocess\n if num_processes != 1:\n if isinstance(new_dataset, IterableDataset):\n+ if getattr(dataloader.dataset, \"generator\", None) is not None:\n+ generator = dataloader.dataset.generator\n new_dataset = IterableDatasetShard(\n new_dataset,\n batch_size=dataloader.batch_size,\n@@ -355,6 +381,13 @@ def prepare_data_loader(\n )\n else:\n # New batch sampler for the current process.\n+ if hasattr(dataloader.sampler, \"generator\"):\n+ if dataloader.sampler.generator is None:\n+ dataloader.sampler.generator = torch.Generator()\n+ generator = dataloader.sampler.generator\n+ generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\n+ elif getattr(dataloader.batch_sampler, \"generator\", None) is not None:\n+ generator = dataloader.batch_sampler.generator\n new_batch_sampler = BatchSamplerShard(\n dataloader.batch_sampler,\n num_processes=num_processes,\n@@ -369,8 +402,12 @@ def prepare_data_loader(\n \"sampler\",\n \"batch_sampler\",\n \"drop_last\",\n+ \"generator\",\n ]\n \n+ if rng_types is not None and generator is None and \"generator\" in rng_types:\n+ rng_types.remove(\"generator\")\n+\n kwargs = {\n k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])\n for k in _PYTORCH_DATALOADER_KWARGS\n@@ -380,5 +417,7 @@ def prepare_data_loader(\n new_dataset,\n device=device if put_on_device else None,\n batch_sampler=new_batch_sampler,\n+ rng_types=rng_types,\n+ generator=generator,\n **kwargs,\n )\ndiff --git a/src/accelerate/test_utils/test_script.py b/src/accelerate/test_utils/test_script.py\nindex e8282a319..62543a17b 100644\n--- a/src/accelerate/test_utils/test_script.py\n+++ b/src/accelerate/test_utils/test_script.py\n@@ -14,6 +14,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from packaging import version\n+\n import torch\n from torch.utils.data import DataLoader\n \n@@ -34,10 +36,16 @@ def init_state_check():\n \n def rng_sync_check():\n state = AcceleratorState()\n- synchronize_rng_states()\n+ synchronize_rng_states([\"torch\"])\n assert are_the_same_tensors(torch.get_rng_state())\n if state.distributed_type == DistributedType.MULTI_GPU:\n+ synchronize_rng_states([\"cuda\"])\n assert are_the_same_tensors(torch.cuda.get_rng_state())\n+ if version.parse(torch.__version__) >= version.parse(\"1.6.0\"):\n+ generator = torch.Generator()\n+ synchronize_rng_states([\"generator\"], generator=generator)\n+ assert are_the_same_tensors(generator.get_state())\n+\n if state.local_process_index == 0:\n print(\"All rng are properly synched.\")\n \n@@ -101,13 +109,14 @@ def dl_preparation_check():\n print(\"Shuffled dataloader passing.\")\n \n \n-def mock_training(length, batch_size):\n+def mock_training(length, batch_size, generator):\n set_seed(42)\n+ generator.manual_seed(42)\n train_set = RegressionDataset(length=length)\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n- for _ in range(3):\n+ for epoch in range(3):\n for batch in train_dl:\n model.zero_grad()\n output = model(batch[\"x\"])\n@@ -119,21 +128,23 @@ def mock_training(length, batch_size):\n \n def training_check():\n state = AcceleratorState()\n+ generator = torch.Generator()\n batch_size = 8\n length = batch_size * 4 * state.num_processes\n \n- train_set, old_model = mock_training(length, batch_size * state.num_processes)\n+ train_set, old_model = mock_training(length, batch_size * state.num_processes, generator)\n assert are_the_same_tensors(old_model.a)\n assert are_the_same_tensors(old_model.b)\n \n accelerator = Accelerator()\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n \n train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n set_seed(42)\n- for _ in range(3):\n+ generator.manual_seed(42)\n+ for epoch in range(3):\n for batch in train_dl:\n model.zero_grad()\n output = model(batch[\"x\"])\n@@ -145,15 +156,16 @@ def training_check():\n assert torch.allclose(old_model.a, model.a)\n assert torch.allclose(old_model.b, model.b)\n \n- accelerator.print(\"Training yielded the same results on one CPU or distributes setup with no batch split.\")\n+ accelerator.print(\"Training yielded the same results on one CPU or distributed setup with no batch split.\")\n \n accelerator = Accelerator(split_batches=True)\n- train_dl = DataLoader(train_set, batch_size=batch_size * state.num_processes, shuffle=True)\n+ train_dl = DataLoader(train_set, batch_size=batch_size * state.num_processes, shuffle=True, generator=generator)\n model = RegressionModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n \n train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n set_seed(42)\n+ generator.manual_seed(42)\n for _ in range(3):\n for batch in train_dl:\n model.zero_grad()\n@@ -170,12 +182,13 @@ def training_check():\n \n # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16\n accelerator = Accelerator(fp16=True)\n- train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n+ train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)\n model = RegressionModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n \n train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)\n set_seed(42)\n+ generator.manual_seed(42)\n for _ in range(3):\n for batch in train_dl:\n model.zero_grad()\ndiff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\nindex 9e99d4c7b..4a078b475 100644\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -13,6 +13,8 @@\n # limitations under the License.\n \n import random\n+from enum import Enum\n+from typing import List, Optional, Union\n \n import numpy as np\n import torch\n@@ -24,6 +26,13 @@\n import torch_xla.core.xla_model as xm\n \n \n+class RNGType(Enum):\n+ TORCH = \"torch\"\n+ CUDA = \"cuda\"\n+ XLA = \"xla\"\n+ GENERATOR = \"generator\"\n+\n+\n def set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n@@ -36,27 +45,46 @@ def set_seed(seed: int):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # ^^ safe to call this function even if cuda is not available\n+ if is_tpu_available():\n+ xm.set_rng_state(seed)\n \n \n-def synchronize_rng_states():\n- \"\"\"\n- Helper function to synchronize the rng states in distributed training.\n- \"\"\"\n+def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):\n+ # Get the proper rng state\n+ if rng_type == RNGType.TORCH:\n+ rng_state = torch.get_rng_state()\n+ elif rng_type == RNGType.CUDA:\n+ rng_state = torch.cuda.get_rng_state()\n+ elif rng_type == RNGType.XLA:\n+ assert is_tpu_available(), \"Can't synchronize XLA seeds on an environment without TPUs.\"\n+ rng_state = torch.tensor(xm.get_rng_state())\n+ elif rng_type == RNGType.GENERATOR:\n+ assert generator is not None, \"Need a generator to synchronize its seed.\"\n+ rng_state = generator.get_state()\n+ \n+ # Broadcast the rng state from device 0 to other devices\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n- rng_state = torch.get_rng_state()\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n- torch.set_rng_state(rng_state)\n elif state.distributed_type == DistributedType.MULTI_GPU:\n- rng_state = torch.get_rng_state().to(state.device)\n- # Broadcast the state from process 0 to all the others.\n+ rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n- torch.set_rng_state(rng_state.cpu())\n+ rng_state = rng_state.cpu()\n \n- # Broadcast the state from process 0 to all the others.\n- rng_state = torch.cuda.get_rng_state().to(state.device)\n- torch.distributed.broadcast(rng_state, 0)\n- torch.cuda.set_rng_state(rng_state.cpu())\n+ # Set the broadcast rng state\n+ if rng_type == RNGType.TORCH:\n+ torch.set_rng_state(rng_state)\n+ elif rng_type == RNGType.CUDA:\n+ torch.cuda.set_rng_state(rng_state)\n+ elif rng_type == RNGType.XLA:\n+ xm.set_rng_state(rng_state.item())\n+ elif rng_type == RNGType.GENERATOR:\n+ generator.set_state(rng_state)\n+\n+\n+def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):\n+ for rng_type in rng_types:\n+ synchronize_rng_state(RNGType(rng_type), generator=generator)\n \n \n def send_to_device(tensor, device):\n", "code_comments": [ { "body": "I like this!", "diff_hunk": "@@ -42,6 +44,18 @@ class Accelerator:\n cpu (:obj:`bool`, `optional`):\n Whether or not to force the script to execute on CPU. Will ignore GPU available if set to :obj:`True` and\n force the execution on one process only.\n+ rng_types (list of :obj:`str` or :class:`~accelerate.utils.RNGType`):", "from_author": false }, { "body": "Maybe you can move this comment above if you want to keep it", "diff_hunk": "@@ -35,28 +44,47 @@ def set_seed(seed: int):\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n+ if is_tpu_available():\n+ xm.set_rng_state(seed)\n # ^^ safe to call this function even if cuda is not available", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/8", "pr_id": 588461912 }, { "diff": "diff --git a/README.md b/README.md\nindex 728976692..0b28b6a50 100644\n--- a/README.md\n+++ b/README.md\n@@ -46,9 +46,9 @@ limitations under the License.\n \n ## Easy to integrate\n \n-πŸ€— Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boiler code needed to use multi-GPUs/TPU/fp16.\n+πŸ€— Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16.\n \n-πŸ€— Accelerate abstracts exactly and only the boiler code related to multi-GPUs/TPU/fp16 and let the rest of your code unchanged.\n+πŸ€— Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged.\n \n Here is an example:\n \n@@ -149,11 +149,11 @@ for epoch in range(10):\n </tr>\n </table>\n \n-As you can see on this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).\n+As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).\n \n-The same code can then in particular run without modification on your local machine for debugging or your training environment.\n+In particular, the same code can then be run without modification on your local machine for debugging or your training environment.\n \n-πŸ€— Accelerate even handles the device placement for you (a bit more changes to your code but safer in general), so you can even simplify your training loop further:\n+πŸ€— Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further:\n \n <table>\n <tr>\n@@ -254,7 +254,7 @@ for epoch in range(10):\n \n ## Launching script\n \n-πŸ€— Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment then launch the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!\n+πŸ€— Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!\n On your machine(s) just run:\n \n ```bash\n@@ -277,7 +277,7 @@ accelerate launch examples/nlp_example.py\n \n You should use πŸ€— Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library, In fact the whole API of πŸ€— Accelerate is in one class, the `Accelerator` object.\n \n-## Why shouldn't use πŸ€— Accelerate?\n+## Why shouldn't I use πŸ€— Accelerate?\n \n You shouldn't use πŸ€— Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, πŸ€— Accelerate is not one of them.\n \n", "code_comments": [], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/7", "pr_id": 588102643 }, { "diff": "diff --git a/README.md b/README.md\nindex 794176758..728976692 100644\n--- a/README.md\n+++ b/README.md\n@@ -140,7 +140,7 @@ for epoch in range(10):\n output, targets\n )\n \n-+ accelerate.backward(loss)\n++ accelerator.backward(loss)\n \n optimizer.step()\n ```\n@@ -243,7 +243,7 @@ for epoch in range(10):\n output, targets\n )\n \n-+ accelerate.backward(loss)\n++ accelerator.backward(loss)\n \n optimizer.step()\n ```\n", "code_comments": [], "context": [ { "body": "Thanks a lot!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/6", "pr_id": 586408354 }, { "diff": "diff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex e20426857..859309a7a 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -49,6 +49,9 @@\n \n \n def training_function(config, args):\n+ # Initialize accelerator\n+ accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)\n+\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n@@ -97,14 +100,13 @@ def collate_fn(examples):\n )\n \n set_seed(seed)\n- # Initialize accelerator\n- accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)\n \n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n- # We could avoid this line since we set the accelerator with `device_placement=True`.\n- # If setting devices manually, this line absolutely needs to be before the optimizer creation otherwise training\n- # will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n+\n+ # We could avoid this line since the accelerator is set with `device_placement=True` (default value).\n+ # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\n+ # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\n model = model.to(accelerator.device)\n \n # Instantiate optimizer\n", "code_comments": [], "context": [ { "body": "Ah thanks a lot for fixing!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/4", "pr_id": 585909977 }, { "diff": "diff --git a/README.md b/README.md\nindex 3776c7cc2..a9d6c5704 100644\n--- a/README.md\n+++ b/README.md\n@@ -270,7 +270,7 @@ accelerate launch my_script.py --args_to_my_script\n For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo):\n \n ```bash\n-accelerate launch examples/glue_example.py --task_name mrpc --model_name_or_path bert-base-cased\n+accelerate launch examples/nlp_example.py\n ```\n \n ## Why should I use πŸ€— Accelerate?\n", "code_comments": [ { "body": "```suggestion\r\naccelerate launch examples/nlp_example.py\r\n```\r\nThis one does not take any arg however.", "diff_hunk": "@@ -270,7 +270,7 @@ accelerate launch my_script.py --args_to_my_script\n For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo):\n \n ```bash\n-accelerate launch examples/glue_example.py --task_name mrpc --model_name_or_path bert-base-cased\n+accelerate launch examples/nlp_example.py --task_name mrpc --model_name_or_path bert-base-cased", "from_author": false } ], "context": [ { "body": "Thanks a lot!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/3", "pr_id": 585905166 }, { "diff": "diff --git a/README.md b/README.md\nindex 3776c7cc2..3fa28ba40 100644\n--- a/README.md\n+++ b/README.md\n@@ -254,7 +254,7 @@ for epoch in range(10):\n \n ## Launching script\n \n-πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment then launch the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!\n+πŸ€— Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment then launch the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!\n On your machine(s) just run:\n \n ```bash\ndiff --git a/examples/nlp_example.py b/examples/nlp_example.py\nindex 32ff600a9..e20426857 100644\n--- a/examples/nlp_example.py\n+++ b/examples/nlp_example.py\n@@ -39,7 +39,7 @@\n #\n # To run it in each of these various modes, follow the instructions\n # in the readme for examples:\n-# https://github.com/huggingface/accelerate/examples\n+# https://github.com/huggingface/accelerate/tree/main/examples\n #\n ########################################################################\n \n", "code_comments": [], "context": [ { "body": "Thanks!", "from_author": false } ], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/2", "pr_id": 585905045 }, { "diff": "diff --git a/.gitignore b/.gitignore\nindex b6e47617d..3b89c957d 100644\n--- a/.gitignore\n+++ b/.gitignore\n@@ -127,3 +127,6 @@ dmypy.json\n \n # Pyre type checker\n .pyre/\n+\n+# VSCode\n+.vscode\ndiff --git a/README.md b/README.md\nindex 781eea3c9..fcc20a12a 100644\n--- a/README.md\n+++ b/README.md\n@@ -34,7 +34,98 @@\n <p>Run your *raw* PyTorch training script on any kind of device\n </h3>\n \n-πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then runs seamlessly on your local machine for debugging or your training environment.\n+πŸ€— Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boiler code needed to use multi-GPUs/TPU/fp16.\n+\n+πŸ€— Accelerate abstracts exactly and only the boiler code related to multi-GPUs/TPU/fp16 and let the rest of your code unchanged.\n+\n+Here is an example:\n+\n+<table>\n+<tr>\n+<th> Original training code (CPU or mono-GPU only)</th>\n+<th> With Accelerate for CPU/GPU/multi-GPUs/TPUs/fp16 </th>\n+</tr>\n+<tr>\n+<td>\n+\n+```python\n+import torch\n+import torch.nn.functional as F\n+from datasets import load_dataset\n+\n+\n+\n+device = 'cpu'\n+\n+model = torch.nn.Transformer().to(device)\n+optim = torch.optim.Adam(model.parameters())\n+\n+dataset = load_dataset('my_dataset')\n+data = torch.utils.data.Dataloader(dataset)\n+\n+\n+\n+\n+model.train()\n+for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n+\n+ optimizer.zero_grad()\n+\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n+\n+ loss.backward()\n+\n+ optimizer.step()\n+```\n+\n+</td>\n+<td>\n+\n+```python\n+ import torch\n+ import torch.nn.functional as F\n+ from datasets import load_dataset\n+\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()\n++ device = accelerator.device\n+\n+ model = torch.nn.Transformer().to(device)\n+ optim = torch.optim.Adam(model.parameters())\n+\n+ dataset = load_dataset('my_dataset')\n+ data = torch.utils.data.Dataloader(dataset)\n+\n++ model, optim, data = accelerator.prepare(\n+ model, optim, data)\n+\n+ model.train()\n+ for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n+\n+ optimizer.zero_grad()\n+\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n+\n++ accelerate.backward(loss)\n+\n+ optimizer.step()\n+```\n+\n+</td>\n+</tr>\n+</table>\n+\n+As you can see on this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).\n+\n+The same code can then in paticular run without modification on your local machine for debugging or your training environment.\n \n πŸ€— Accelerate also provides a CLI tool that allows you to quickly configure and test your training environment then launch the scripts.\n \ndiff --git a/examples/README.md b/examples/README.md\nnew file mode 100644\nindex 000000000..cb1d2645b\n--- /dev/null\n+++ b/examples/README.md\n@@ -0,0 +1,75 @@\n+# In this folder we showcase various full examples using `Accelerate`\n+\n+## Simple NLP example\n+\n+The [simple_example.py](./simple_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC]()).\n+\n+The same script can be run in any of the following configurations:\n+- single CPU or single GPU\n+- multi GPUS (using PyTorch distributed mode)\n+- (multi) TPUs\n+- fp16 (mixed-precision) or fp32 (normal precision)\n+\n+To run it in each of these various modes, use the following commands:\n+- single CPU:\n+ * from a server without GPU\n+ ```bash\n+ python ./simple_example.py\n+ ```\n+ * from any server by passing `cpu=True` to the `Accelerator`.\n+ ```bash\n+ python ./simple_example.py --cpu\n+ ```\n+ * from any server with Accelerate launcher\n+ ```bash\n+ accelerate launch --cpu ./simple_example.py\n+ ```\n+- single GPU:\n+ ```bash\n+ python ./simple_example.py # from a server with a GPU\n+ ```\n+- with fp16 (mixed-precision)\n+ * from any server by passing `fp16=True` to the `Accelerator`.\n+ ```bash\n+ python ./simple_example.py --fp16\n+ ```\n+ * from any server with Accelerate launcher\n+ ```bash\n+ accelerate launch --fb16 ./simple_example.py\n+- multi GPUS (using PyTorch distributed mode)\n+ * With Accelerate config and launcher\n+ ```bash\n+ accelerate config # This will create a config file on your server\n+ accelerate launch ./simple_example.py # This will run the script on your server\n+ ```\n+ * With traditional PyTorch launcher\n+ ```bash\n+ python -m torch.distributed.launch --nproc_per_node 2 --use_env ./simple_example.py\n+ ```\n+- multi GPUs, multi node (several machines, using PyTorch distributed mode)\n+ * With Accelerate config and launcher, on each machine:\n+ ```bash\n+ accelerate config # This will create a config file on each server\n+ accelerate launch ./simple_example.py # This will run the script on each server\n+ ```\n+ * With PyTorch launcher only\n+ ```bash\n+ python -m torch.distributed.launch --nproc_per_node 2 \\\n+ --use_env \\\n+ --node_rank 0 \\\n+ --master_addr master_node_ip_address \\\n+ ./simple_example.py # On the first server\n+ python -m torch.distributed.launch --nproc_per_node 2 \\\n+ --use_env \\\n+ --node_rank 1 \\\n+ --master_addr master_node_ip_address \\\n+ ./simple_example.py # On the second server\n+ ```\n+- (multi) TPUs\n+ * With Accelerate config and launcher\n+ ```bash\n+ accelerate config # This will create a config file on your TPU server\n+ accelerate launch ./simple_example.py # This will run the script on each server\n+ ```\n+ * In PyTorch:\n+ Add an `xmp.spawn` line in your script as you usually do.\ndiff --git a/examples/simple_example.py b/examples/simple_example.py\nindex dbbfd22f8..88c81e3a0 100644\n--- a/examples/simple_example.py\n+++ b/examples/simple_example.py\n@@ -11,11 +11,28 @@\n )\n \n \n+########################################################################\n+# This is a fully working simple example to use Accelerate\n+#\n+# This example train a Bert base model on GLUE MRPC\n+# in any of the following settings (with the same script):\n+# - single CPU or single GPU\n+# - multi GPUS (using PyTorch distributed mode)\n+# - (multi) TPUs\n+# - fp16 (mixed-precision) or fp32 (normal precision)\n+#\n+# To run it in each of these various modes, follow the instructions\n+# in the readme for examples:\n+# https://github.com/huggingface/accelerate/examples\n+#\n+########################################################################\n+\n+\n MAX_GPU_BATCH_SIZE = 16\n EVAL_BATCH_SIZE = 32\n \n \n-def training_function(config):\n+def training_function(config, args):\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n lr = config[\"lr\"]\n num_epochs = int(config[\"num_epochs\"])\n@@ -62,7 +79,7 @@ def collate_fn(examples):\n \n set_seed(seed)\n # Initialize accelerator\n- accelerator = Accelerator(device_placement=False)\n+ accelerator = Accelerator(device_placement=False, fp16=args.fp16, cpu=args.cpu)\n \n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\n model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\", return_dict=True)\n@@ -117,8 +134,20 @@ def collate_fn(examples):\n \n \n def main():\n+ parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n+ parser.add_argument(\n+ \"--fp16\",\n+ action=\"store_true\",\n+ help=\"If passed, will use FP16 training.\",\n+ )\n+ parser.add_argument(\n+ \"--cpu\",\n+ action=\"store_true\",\n+ help=\"If passed, will train on the CPU.\",\n+ )\n+ args = parser.parse_args()\n config = {\"lr\": 2e-5, \"num_epochs\": 3, \"correct_bias\": True, \"seed\": 42, \"batch_size\": 16}\n- training_function(config)\n+ training_function(config, args)\n \n \n if __name__ == \"__main__\":\ndiff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py\nindex 96e3b2c86..a18f31372 100644\n--- a/src/accelerate/accelerator.py\n+++ b/src/accelerate/accelerator.py\n@@ -14,7 +14,7 @@ class Accelerator:\n \n Args:\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n- Whether or not the accelerator should put objects on device (tensors yielded by the datalaoder, model,\n+ Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,\n etc...).\n split_batches (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If\n@@ -38,6 +38,7 @@ def __init__(\n self, device_placement: bool = True, split_batches: bool = False, fp16: bool = None, cpu: bool = False\n ):\n self.state = AcceleratorState(fp16=fp16, cpu=cpu, _from_accelerator=True)\n+\n self.device_placement = device_placement\n self.split_batches = split_batches\n \n@@ -99,6 +100,13 @@ def prepare(self, *args):\n \"\"\"\n Prepare all objects passed in :obj:`args` for distributed training and mixed precision, then return them in the\n same order.\n+\n+ Accepts the following type of objects:\n+\n+ - :obj:`torch.utils.data.DataLoader`: PyTorch Dataloader\n+ - :obj:`torch.nn.Module`: PyTorch Module\n+ - :obj:`torch.optim.Optimizer`: PyTorch Optimizer\n+\n \"\"\"\n # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will\n # have parameters disconnected from the model (so no training :-( ).\ndiff --git a/src/accelerate/commands/launch.py b/src/accelerate/commands/launch.py\nindex d559d0b0b..601cba147 100644\n--- a/src/accelerate/commands/launch.py\n+++ b/src/accelerate/commands/launch.py\n@@ -32,6 +32,9 @@ def launch_command_parser(subparsers=None):\n parser.add_argument(\n \"--fp16\", default=False, action=\"store_true\", help=\"Whether or not to use mixed precision training.\"\n )\n+ parser.add_argument(\n+ \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\n+ )\n parser.add_argument(\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n )\n@@ -77,6 +80,7 @@ def simple_launcher(args):\n cmd.extend(args.training_script_args)\n \n current_env = os.environ.copy()\n+ current_env[\"USE_CPU\"] = str(args.cpu)\n current_env[\"USE_FP16\"] = str(args.fp16)\n \n process = subprocess.Popen(cmd, env=current_env)\n@@ -143,7 +147,7 @@ def launch_command(args):\n raise ValueError(\"You can only pick one between `--multi_gpu` and `--tpu`.\")\n \n # Get the default from the config file.\n- if args.config_file is not None or os.path.isfile(default_config_file):\n+ if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = LaunchConfig.from_json_file(json_file=args.config_file)\n if not args.multi_gpu and not args.tpu:\n args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU\n@@ -157,9 +161,9 @@ def launch_command(args):\n args.num_processes = 1\n \n # Use the proper launcher\n- if args.multi_gpu:\n+ if args.multi_gpu and not args.cpu:\n multi_gpu_launcher(args)\n- elif args.tpu:\n+ elif args.tpu and not args.cpu:\n tpu_launcher(args)\n else:\n simple_launcher(args)\n", "code_comments": [ { "body": "```suggestion\r\n+ accelerator = Accelerator(device_placement=False)\r\n```\r\nThe default is still `True` as it's safer for the user (and it still takes one line of code to have the default to `False`). If the table has space enough, I'd add the same example with the device placement handled by `accelerate` (like the current example below).", "diff_hunk": "@@ -34,7 +34,98 @@\n <p>Run your *raw* PyTorch training script on any kind of device\n </h3>\n \n-πŸ€— Accelerate provides an easy API to make your scripts run with mixed precision and on any kind of distributed setting (multi-GPUs, TPUs etc.) while still letting you write your own training loop. The same code can then run on your local machine for debugging or your training environment.\n+πŸ€— Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boiler code needed to use multi-GPUs/TPU/fp16.\n+\n+πŸ€— Accelerate abstracts exactly and only the boiler code related to multi-GPUs/TPU/fp16 and let the rest of your code unchanged.\n+\n+Here is an example:\n+\n+<table>\n+<tr>\n+<th> Original training code (CPU or mono-GPU only)</th>\n+<th> With Accelerate for CPU/GPU/multi-GPUs/TPUs/fp16 </th>\n+</tr>\n+<tr>\n+<td>\n+\n+```python\n+import torch\n+import torch.nn.functional as F\n+from datasets import load_dataset\n+\n+\n+\n+device = 'cpu'\n+\n+model = torch.nn.Transformer().to(device)\n+optim = torch.optim.Adam(model.parameters())\n+\n+dataset = load_dataset('my_dataset')\n+data = torch.utils.data.Dataloader(dataset)\n+\n+\n+\n+\n+model.train()\n+for epoch in range(10):\n+ for source, targets in data:\n+ source = source.to(device)\n+ targets = targets.to(device)\n+\n+ optimizer.zero_grad()\n+\n+ output = model(source, targets)\n+ loss = F.cross_entropy(output, targets)\n+\n+ loss.backward()\n+\n+ optimizer.step()\n+```\n+\n+</td>\n+<td>\n+\n+```python\n+ import torch\n+ import torch.nn.functional as F\n+ from datasets import load_dataset\n+\n++ from accelerate import Accelerator\n++ accelerator = Accelerator()", "from_author": false } ], "context": [], "url": "https://api.github.com/repos/huggingface/accelerate/pulls/1", "pr_id": 577481934 } ]