Dataset Viewer
Auto-converted to Parquet
text
stringlengths
5
409k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
594
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`. This particular script verifies this for DeepSpeed training. NOTE: MS-AMP does *not* support ZeRO-3. """ # import msamp.deepspeed as msamp_deepspeed import evaluate import torch from fp8_utils import evaluate_model, get_training_utilities from msamp import deepspeed as msamp_deepspeed from accelerate import Accelerator, DeepSpeedPlugin from accelerate.state import AcceleratorState from accelerate.utils import set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(zero_stage: int = 1, opt_level: str = "O1"): set_seed(42) accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) import numpy as np config = { "train_batch_size": 32, "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": { "stage": zero_stage, "offload_optimizer": {"device": "none", "nvme_path": None}, "offload_param": {"device": "none", "nvme_path": None}, }, "gradient_clipping": 1.0, "steps_per_print": np.inf, "bf16": {"enabled": True}, "fp16": {"enabled": False}, "zero_allow_untested_optimizer": True, "msamp": { "enabled": True, "opt_level": opt_level, }, } ( model, optimizer, _, _, ) = msamp_deepspeed.initialize( model=model, optimizer=optimizer, config_params=config, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss model.backward(loss) model.step() for _ in range(accelerator.num_processes): lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() torch.cuda.empty_cache() AcceleratorState()._reset_state(True) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results def train_integration(zero_stage: int = 1, opt_level: str = "O1"): set_seed(42) deepspeed_plugin = DeepSpeedPlugin( zero_stage=zero_stage, enable_msamp=True, msamp_opt_level=opt_level, ) accelerator = Accelerator(mixed_precision="fp8", deepspeed_plugin=deepspeed_plugin) accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16 model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() torch.cuda.empty_cache() assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' AcceleratorState()._reset_state(True) return base_model_results, trained_model_results if __name__ == "__main__": for zero_stage in [1, 2]: for opt_level in ["O1", "O2", "O3"]: baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level) accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level) assert ( baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}' assert ( baseline_not_trained["f1"] == accelerator_not_trained["f1"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}' assert ( baseline_trained["accuracy"] == accelerator_trained["accuracy"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}' assert ( baseline_trained["f1"] == accelerator_trained["f1"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}' torch.distributed.destroy_process_group()
accelerate/benchmarks/fp8/ms_amp/distrib_deepspeed.py/0
{ "file_path": "accelerate/benchmarks/fp8/ms_amp/distrib_deepspeed.py", "repo_id": "accelerate", "token_count": 2583 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Loading big models into memory When loading a pre-trained model in PyTorch, the usual workflow looks like this: ```py import torch my_model = ModelClass(...) state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` In plain English, those steps are: 1. Create the model with randomly initialized weights 2. Load the model weights (in a dictionary usually called a state dict) from the disk 3. Load those weights inside the model While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pre-trained weights. If you're loading a model with 6 billion parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16). <Tip warning={true}> This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future. </Tip> ## How the Process Works: A Quick Overview <Youtube id="MWCSGj9jEAo" /> ## How the Process Works: Working with Code ### Instantiating an empty model The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works: ```py from accelerate import init_empty_weights with init_empty_weights(): my_model = ModelClass(...) ``` For instance: ```py with init_empty_weights(): model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved to that device. <Tip warning={true}> You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device. </Tip> ### Sharded checkpoints It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards. Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing: ```bash first_state_dict.bin index.json second_state_dict.bin ``` with index.json being the following file: ``` { "linear1.weight": "first_state_dict.bin", "linear1.bias": "first_state_dict.bin", "linear2.weight": "second_state_dict.bin", "linear2.bias": "second_state_dict.bin" } ``` and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"linear1.bias"`, `second_state_dict.bin` the ones for `"linear2.weight"` and `"linear2.bias"` ### Loading weights The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard. If you want to use big model inference with Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model. Let's download the sharded version of this model. ```bash pip install huggingface_hub ``` ```py from huggingface_hub import snapshot_download checkpoint = "marcsun13/gpt2-xl-linear-sharded" weights_location = snapshot_download(repo_id=checkpoint) ``` In order to initialize the model, we will use the library minGPT. ```bash git clone https://github.com/karpathy/minGPT.git pip install minGPT/ ``` ```py from accelerate import init_empty_weights from mingpt.model import GPT model_config = GPT.get_default_config() model_config.model_type = 'gpt2-xl' model_config.vocab_size = 50257 model_config.block_size = 1024 with init_empty_weights(): model = GPT(model_config) ``` Then, load the checkpoint we just downloaded with: ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint=weights_location, device_map="auto", no_split_module_classes=['Block'] ) ``` By passing `device_map="auto"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources: - first, we use the maximum space available on the GPU(s) - if we still need space, we store the remaining weights on the CPU - if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors #### `no_split_module_classes` This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that include a residual connection of some kind. #### The `device_map` You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model: ```py model.hf_device_map ``` ```python out {'transformer.wte': 0, 'transformer.wpe': 0, 'transformer.drop': 0, 'transformer.h.0': 0, ... 'transformer.h.21': 0, 'transformer.h.22': 1, 'transformer.h.23': 1, 'transformer.h.24': 1, ... 'transformer.h.47': 1, 'transformer.ln_f': 1, 'lm_head': 1} ``` It's fully possible to create your own device map for the layers to use as well, specifying the GPU device to use (a number), `"cpu"`, or `"disk"` and pass this in: ```python device_map = { "transformer.wte": "cpu", "transformer.wpe": 0, "transformer.drop": "cpu", "transformer.h.0": "disk" } model = load_checkpoint_and_dispatch( model, checkpoint=weights_location, device_map=device_map ) ``` ### Run the model Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model: ```py from mingpt.bpe import BPETokenizer tokenizer = BPETokenizer() inputs = tokenizer("Hello, my name is").to(0) outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0] tokenizer.decode(outputs.cpu().squeeze()) ``` Behind the scenes, Accelerate added hooks to the model, so that: - at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works) - for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after - for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after This way, your model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM! <Tip warning={true}> This only supports the inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations. </Tip> ### Designing a device map You can let Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go. <Tip> You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device. </Tip> All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). When you have more GPU memory available than the model size, here is the difference between each option: - `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1. - `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models - `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to). <Tip> The options `"auto"` and `"balanced"` produce the same results for now, but the behavior of `"auto"` might change in the future if we find a strategy that makes more sense, while `"balanced"` will stay stable. </Tip> First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want to use for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`. Here is an example where we don't want to use more than 10GiB on each of the two GPUs and no more than 30GiB of CPU RAM for the model weights: ```python from accelerate import infer_auto_device_map device_map = infer_auto_device_map(my_model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"}) ``` <Tip warning={true}> When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage. Therefore when you create memory maps with `max_memory` make sure to adjust the available memory accordingly to avoid out-of-memory errors. </Tip> Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup, the close-to-ideal map is: ```python max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"} ``` as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0. If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance, if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be: ```python device_map = {"block1": 0, "block2": 1} ``` another one that is valid could be: ```python device_map = {"block1": 0, "block2.linear1": 0, "block2.linear2": 1, "block2.linear3": 1} ``` On the other hand, this one is not valid as it does not cover every parameter of the model: ```python device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1} ``` <Tip> To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs. </Tip> ## CPU offload only If you want to offload your model on CPU, you can use [`cpu_offload`]. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device and passed as they are needed, then offloaded again. ```python cpu_offload(model, execution_device) ``` You can also use [`cpu_offload_with_hook`]. This function will offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Furthermore, [`cpu_offload_with_hook`] is more performant but less memory saving. It is useful for pipelines running a model in a loop: ```python model_1, hook_1 = cpu_offload_with_hook(model_1, execution_device) model_2, hook_2 = cpu_offload_with_hook(model_2, execution_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, execution_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` ## Disk offload only To perform disk offload, you can use [`disk_offload`]. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. ```python disk_offload(model, offload_dir, execution_device) ``` ## Limits and further development We are aware of the current limitations in the API: - [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to a lack of RAM. - [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk. - [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys. - The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle. - When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before. - Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
accelerate/docs/source/concept_guides/big_model_inference.md/0
{ "file_path": "accelerate/docs/source/concept_guides/big_model_inference.md", "repo_id": "accelerate", "token_count": 4809 }
1
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Using multiple models with DeepSpeed <Tip warning={true}> This guide assumes that you have read and understood the [DeepSpeed usage guide](./deepspeed.md). </Tip> Running multiple models with Accelerate and DeepSpeed is useful for: * Knowledge distillation * Post-training techniques like RLHF (see the [TRL](https://github.com/huggingface/trl) library for more examples) * Training multiple models at once Currently, Accelerate has a **very experimental API** to help you use multiple models. This tutorial will focus on two common use cases: 1. Knowledge distillation, where a smaller student model is trained to mimic a larger, better-performing teacher. If the student model fits on a single GPU, we can use ZeRO-2 for training and ZeRO-3 to shard the teacher for inference. This is significantly faster than using ZeRO-3 for both models. 2. Training multiple *disjoint* models at once. ## Knowledge distillation Knowledge distillation is a good example of using multiple models, but only training one of them. Normally, you would use a single [`utils.DeepSpeedPlugin`] for both models. However, in this case, there are two separate configurations. Accelerate allows you to create and use multiple plugins **if and only if** they are in a `dict` so that you can reference and enable the proper plugin when needed. ```python from accelerate.utils import DeepSpeedPlugin zero2_plugin = DeepSpeedPlugin(hf_ds_config="zero2_config.json") zero3_plugin = DeepSpeedPlugin(hf_ds_config="zero3_config.json") deepspeed_plugins = {"student": zero2_plugin, "teacher": zero3_plugin} ``` The `zero2_config.json` should be configured for full training (so specify `scheduler` and `optimizer` if you are not utilizing your own), while `zero3_config.json` should only be configured for the inference model, as shown in the example below. ```json { "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": 3, "overlap_comm": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": "auto", "stage3_max_reuse_distance": "auto", }, "train_micro_batch_size_per_gpu": 1 } ``` An example `zero2_config.json` configuration is shown below. ```json { "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } ``` <Tip> DeepSpeed will raise an error if `train_micro_batch_size_per_gpu` isn't specified, even if this particular model isn't being trained. </Tip> From here, create a single [`Accelerator`] and pass in both configurations. ```python from accelerate import Accelerator accelerator = Accelerator(deepspeed_plugins=deepspeed_plugins) ``` Now let's see how to use them. ### Student model By default, Accelerate sets the first item in the `dict` as the default or enabled plugin (`"student"` plugin). Verify this by using the [`utils.deepspeed.get_active_deepspeed_plugin`] function to see which plugin is enabled. ```python active_plugin = get_active_deepspeed_plugin(accelerator.state) assert active_plugin is deepspeed_plugins["student"] ``` [`AcceleratorState`] also keeps the active DeepSpeed plugin saved in `state.deepspeed_plugin`. ```python assert active_plugin is accelerator.deepspeed_plugin ``` Since `student` is the currently active plugin, let's go ahead and prepare the model, optimizer, and scheduler. ```python student_model, optimizer, scheduler = ... student_model, optimizer, scheduler, train_dataloader = accelerator.prepare(student_model, optimizer, scheduler, train_dataloader) ``` Now it's time to deal with the teacher model. ### Teacher model First, you need to specify in [`Accelerator`] that the `zero3_config.json` configuration should be used. ```python accelerator.state.select_deepspeed_plugin("teacher") ``` This disables the `"student"` plugin and enables the `"teacher"` plugin instead. The DeepSpeed stateful config inside of Transformers is updated, and it changes which plugin configuration gets called when using `deepspeed.initialize()`. This allows you to use the automatic `deepspeed.zero.Init` context manager integration Transformers provides. ```python teacher_model = AutoModel.from_pretrained(...) teacher_model = accelerator.prepare(teacher_model) ``` Otherwise, you should manually initialize the model with `deepspeed.zero.Init`. ```python with deepspeed.zero.Init(accelerator.deepspeed_plugin.config): model = MyModel(...) ``` ### Training From here, your training loop can be whatever you like, as long as `teacher_model` is never being trained on. ```python teacher_model.eval() student_model.train() for batch in train_dataloader: with torch.no_grad(): output_teacher = teacher_model(**batch) output_student = student_model(**batch) # Combine the losses or modify it in some way loss = output_teacher.loss + output_student.loss accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Train multiple disjoint models Training multiple models is a more complicated scenario. In its current state, we assume each model is **completely disjointed** from the other during training. This scenario still requires two [`utils.DeepSpeedPlugin`]'s to be made. However, you also need a second [`Accelerator`], since different `deepspeed` engines are being called at different times. A single [`Accelerator`] can only carry one instance at a time. Since the [`state.AcceleratorState`] is a stateful object though, it is already aware of both [`utils.DeepSpeedPlugin`]'s available. You can just instantiate a second [`Accelerator`] with no extra arguments. ```python first_accelerator = Accelerator(deepspeed_plugins=deepspeed_plugins) second_accelerator = Accelerator() ``` You can call either `first_accelerator.state.select_deepspeed_plugin()` to enable or disable a particular plugin, and then call [`prepare`]. ```python # can be `accelerator_0`, `accelerator_1`, or by calling `AcceleratorState().select_deepspeed_plugin(...)` first_accelerator.state.select_deepspeed_plugin("first_model") first_model = AutoModel.from_pretrained(...) # For this example, `get_training_items` is a nonexistent function that gets the setup we need for training first_optimizer, first_scheduler, train_dl, eval_dl = get_training_items(model1) first_model, first_optimizer, first_scheduler, train_dl, eval_dl = accelerator.prepare( first_model, first_optimizer, first_scheduler, train_dl, eval_dl ) second_accelerator.state.select_deepspeed_plugin("second_model") second_model = AutoModel.from_pretrained(...) # For this example, `get_training_items` is a nonexistent function that gets the setup we need for training second_optimizer, second_scheduler, _, _ = get_training_items(model2) second_model, second_optimizer, second_scheduler = accelerator.prepare( second_model, second_optimizer, second_scheduler ) ``` And now you can train: ```python for batch in dl: outputs1 = first_model(**batch) first_accelerator.backward(outputs1.loss) first_optimizer.step() first_scheduler.step() first_optimizer.zero_grad() outputs2 = model2(**batch) second_accelerator.backward(outputs2.loss) second_optimizer.step() second_scheduler.step() second_optimizer.zero_grad() ``` ## Resources To see more examples, please check out the [related tests](https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py) currently in [Accelerate].
accelerate/docs/source/usage_guides/deepspeed_multiple_model.md/0
{ "file_path": "accelerate/docs/source/usage_guides/deepspeed_multiple_model.md", "repo_id": "accelerate", "token_count": 2924 }
2
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # In this folder we showcase various full examples using 🤗 Accelerate ## Simple NLP example The [nlp_example.py](./nlp_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398)). Prior to running it you should install 🤗 Dataset and 🤗 Transformers: ```bash pip install datasets evaluate transformers ``` The same script can be run in any of the following configurations: - single CPU or single GPU - multi CPUs - multi GPUs (using PyTorch distributed mode) - (multi) TPUs - fp16 (mixed-precision) or fp32 (normal precision) To run it in each of these various modes, use the following commands: - single CPU: * from a server without GPU ```bash python ./nlp_example.py ``` * from any server by passing `cpu=True` to the `Accelerator`. ```bash python ./nlp_example.py --cpu ``` * from any server with Accelerate launcher ```bash accelerate launch --cpu ./nlp_example.py ``` - single GPU: ```bash python ./nlp_example.py # from a server with a GPU ``` - with fp16 (mixed-precision) * from any server by passing `mixed_precison=fp16` to the `Accelerator`. ```bash python ./nlp_example.py --mixed_precision fp16 ``` * from any server with Accelerate launcher ```bash accelerate launch --mixed_precision fp16 ./nlp_example.py - multi CPUs (requires Open MPI, Intel MPI, or MVAPICH) * With Accelerate config and launcher, execute the following from node 0: ```bash accelerate config # Select to have accelerate launch mpirun accelerate launch ./nlp_example.py # This will run the script on each server ``` * With Intel MPI: ```bash export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py ``` - multi GPUs (using PyTorch distributed mode) * With Accelerate config and launcher ```bash accelerate config # This will create a config file on your server accelerate launch ./nlp_example.py # This will run the script on your server ``` * With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`) ```bash torchrun --nproc_per_node 2 ./nlp_example.py ``` - multi GPUs, multi node (several machines, using PyTorch distributed mode) * With Accelerate config and launcher, on each machine: ```bash accelerate config # This will create a config file on each server accelerate launch ./nlp_example.py # This will run the script on each server ``` * With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node: ```bash torchrun \ # python -m torch.distributed.run --nproc_per_node 2 \ --nnodes 2 \ --rdzv_id 2299 \ # A unique job id --rdzv_backend c10d \ --rdzv_endpoint master_node_ip_address:29500 \ ./nlp_example.py ``` - (multi) TPUs * With Accelerate config and launcher ```bash accelerate config # This will create a config file on your TPU server accelerate launch ./nlp_example.py # This will run the script on each server ``` * In PyTorch: Add an `xmp.spawn` line in your script as you usually do. ## Simple vision example The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a ResNet-50 on a classification task ([Ofxord-IIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)). The same script can be run in any of the following configurations: - single CPU or single GPU - multi CPUs - multi GPUs (using PyTorch distributed mode) - (multi) TPUs - fp16 (mixed-precision) or fp32 (normal precision) Prior to running it you should install timm and torchvision: ```bash pip install timm torchvision ``` and you should download the data with the following commands: ```bash wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz tar -xzf images.tar.gz ``` To run it in each of these various modes, use the following commands: - single CPU: * from a server without GPU ```bash python ./cv_example.py --data_dir path_to_data ``` * from any server by passing `cpu=True` to the `Accelerator`. ```bash python ./cv_example.py --data_dir path_to_data --cpu ``` * from any server with Accelerate launcher ```bash accelerate launch --cpu ./cv_example.py --data_dir path_to_data ``` - single GPU: ```bash python ./cv_example.py # from a server with a GPU ``` - with fp16 (mixed-precision) * from any server by passing `mixed_precison=fp16` to the `Accelerator`. ```bash python ./cv_example.py --data_dir path_to_data --mixed_precison fp16 ``` * from any server with Accelerate launcher ```bash accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data - multi CPUs (requires Open MPI, Intel MPI, or MVAPICH) * With Accelerate config and launcher, run the following from node 0: ```bash accelerate config --config_file config.yaml # Select to have accelerate launch mpirun accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * With Intel MPI, execute mpirun from node 0: ```bash export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data ``` - multi GPUs (using PyTorch distributed mode) * With Accelerate config and launcher ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on your server ``` * With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`) ```bash torchrun --nproc_per_node 2 ./cv_example.py --data_dir path_to_data ``` - multi GPUs, multi node (several machines, using PyTorch distributed mode) * With Accelerate config and launcher, on each machine: ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node: ```bash torchrun \ # python -m torch.distributed.run --nproc_per_node 2 \ --nnodes 2 \ --rdzv_id 2299 \ # A unique job id --rdzv_backend c10d \ --rdzv_endpoint master_node_ip_address:29500 \ ./cv_example.py --data_dir path_to_data ``` - (multi) TPUs * With Accelerate config and launcher ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * In PyTorch: Add an `xmp.spawn` line in your script as you usually do. ### Simple vision example (GANs) - [huggan project](https://github.com/huggingface/community-events/tree/main/huggan) ### Using AWS SageMaker integration - [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker) ## Configuration zoo In [/config_yaml_templates](./config_yaml_templates/) we have a variety of *minimal* `config.yaml` templates and examples to help you learn how to create your own configuration files depending on the scenario. ## SLURM Scripts In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) and [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we present two scripts for running the examples on a machine with [SLURM](https://slurm.schedmd.com/documentation.html) workload manager. In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in the launcher that needs to be modified is `--num_processes`, which determines the number of GPUs we will use. In this case, using the environment variable `$SLURM_GPUS`, we indicate that we want to utilize all the GPUs available on the node we have requested. In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun. In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster. ```bash # activateEnvironment.sh module purge module load anaconda3/2020.02 cuda/10.2 cudnn/8.0.5 nccl/2.9.9 arrow/7.0.0 openmpi source activate /home/nct01/nct01328/pytorch_antoni_local export HF_HOME=/gpfs/projects/nct01/nct01328/ export HF_LOCAL_HOME=/gpfs/projects/nct01/nct01328/HF_LOCAL export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 export PYTHONPATH=/home/nct01/nct01328/transformers-in-supercomputers:$PYTHONPATH export GPUS_PER_NODE=4 ``` ## Simple Multi-GPU Hardware Launcher (using an external platform) [multigpu_remote_launcher.py](./multigpu_remote_launcher.py) is a minimal script that demonstrates launching accelerate on multiple remote GPUs, and with automatic hardware environment and dependency setup for reproducibility. You can easily customize the training function used, training arguments, hyperparameters, and type of compute hardware, and then run the script to automatically launch multi GPU training on remote hardware. This script uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own cloud account or on-premise cluster) but there are other options for running remotely as well. Runhouse can be installed with `pip install runhouse`, and you can refer to [hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup) for hardware setup instructions, or this [Colab tutorial](https://colab.research.google.com/drive/1qVwYyLTCPYPSdz9ZX7BZl9Qm0A3j7RJe) for a more in-depth walkthrough. ## Finer Examples While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations. ### `by_feature` examples These scripts are *individual* examples highlighting one particular feature or use-case within Accelerate. They all stem from the [nlp_example.py](./nlp_example.py) script, and any changes or modifications is denoted with a `# New Code #` comment. Read the README.md file located in the `by_feature` folder for more information. ### `complete_*` examples These two scripts contain *every* single feature currently available in Accelerate in one place, as one giant script. New arguments that can be passed include: - `checkpointing_steps`, whether the various states should be saved at the end of every `n` steps, or `"epoch"` for each epoch. States are then saved to folders named `step_{n}` or `epoch_{n}` - `resume_from_checkpoint`, should be used if you want to resume training off of a previous call to the script and passed a `checkpointing_steps` to it. - `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML.
accelerate/examples/README.md/0
{ "file_path": "accelerate/examples/README.md", "repo_id": "accelerate", "token_count": 4684 }
3
{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/examples/deepspeed_config_templates/zero_stage3_config.json/0
{ "file_path": "accelerate/examples/deepspeed_config_templates/zero_stage3_config.json", "repo_id": "accelerate", "token_count": 657 }
4
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage5(Scene): def construct(self): # The dataset items colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"] fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) columns = [ VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0) for j in range(4) ] dataset_recs = VGroup(*columns).arrange(UP, buff=0) dataset_text = Text("Dataset", font_size=24) dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) dataset.move_to([-2,0,0]) self.add(dataset) code = Code( code="# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...", tab_width=4, background="window", language="Python", font="Monospace", font_size=14, corner_radius=.2, insert_line_no=False, line_spacing=.75, style=Code.styles_list[1], ) code.move_to([-3.5, 2.5, 0]) self.add(code) # The dataloader itself sampler_1 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 1", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_2 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 2", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_3 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 3", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_4 = Group( Rectangle(color="blue", height=1, width=1), Text("Sampler GPU 4", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_1.move_to([2,2,0]) sampler_2.move_to([2,.5,0]) sampler_3.move_to([2,-1.,0]) sampler_4.move_to([2,-2.5,0]) self.add(sampler_1, sampler_2, sampler_3, sampler_4) samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]] gpu_1 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 1", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0]) gpu_2 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 2", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0]) gpu_3 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 3", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0]) gpu_4 = Group( Rectangle(color="white", height=1, width=1), Text("Output GPU 4", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0]) gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]] self.add(gpu_1, gpu_2, gpu_3, gpu_4) # Animate their existence self.play( Create(gpu_1[0], run_time=1), Create(gpu_2[0], run_time=1), Create(gpu_3[0], run_time=1), Create(gpu_4[0], run_time=1), Create(dataset_recs, run_time=1), Create(sampler_1[0], run_time=1), Create(sampler_2[0], run_time=1), Create(sampler_3[0], run_time=1), Create(sampler_4[0], run_time=1), ) first_animations = [] second_animations = [] colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"] current_color = colors[0] buff = 0 lr_buff = .25 old_target = None new_datasets = [] for i,row_data in enumerate(dataset_recs): new_row = [] current_color = colors[i] if i == 0: idx = -3 elif i == 1: idx = -2 elif i == 2: idx = -1 elif i == 3: idx = 0 for j,indiv_data in enumerate(row_data): dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7) dataset_target.move_to(indiv_data) dataset_target.generate_target() aligned_edge = ORIGIN if j % 8 == 0: aligned_edge = LEFT dataset_target.target.next_to( samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) dataset_target.target.set_x(dataset_target.target.get_x()) elif j % 4 == 0: old_target = dataset_target.target dataset_target.target.next_to( samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) dataset_target.target.set_x(dataset_target.target.get_x()) dataset_target.target.set_y(dataset_target.target.get_y()-.25) else: dataset_target.target.next_to( old_target, direction=RIGHT, buff=0.02, ) old_target = dataset_target.target new_row.append(dataset_target) first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color)) second_animations.append(MoveToTarget(dataset_target, run_time=1.5)) new_datasets.append(new_row) step_1 = MarkupText( f"Since we splice the dataset between each GPU,\nthe models weights can be averaged during `backward()`\nActing as though we did one giant epoch\nvery quickly.", font_size=18 ) step_1.move_to([-2.5, -2, 0]) self.play( Write(step_1, run_time=3), ) self.play( *first_animations, ) self.play(*second_animations) self.wait(duration=.5) move_animation = [] import random for i,row in enumerate(new_datasets): # row = [row[k] for k in random.sample(range(8), 8)] current_color = colors[i] if i == 0: idx = -3 elif i == 1: idx = -2 elif i == 2: idx = -1 elif i == 3: idx = 0 for j,indiv_data in enumerate(row): indiv_data.generate_target() aligned_edge = ORIGIN if j % 8 == 0: aligned_edge = LEFT indiv_data.target.next_to( gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) indiv_data.target.set_x(indiv_data.target.get_x()) elif j % 4 == 0: indiv_data.target.next_to( gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN, ) indiv_data.target.set_x(indiv_data.target.get_x()) indiv_data.target.set_y(indiv_data.target.get_y()-.25) else: indiv_data.target.next_to( old_target, direction=RIGHT, buff=0.02, ) old_target = indiv_data.target move_animation.append(MoveToTarget(indiv_data, run_time=1.5)) self.play(*move_animation) self.wait()
accelerate/manim_animations/dataloaders/stage_5.py/0
{ "file_path": "accelerate/manim_animations/dataloaders/stage_5.py", "repo_id": "accelerate", "token_count": 4515 }
5
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_mlu_available, is_musa_available, is_npu_available, is_sdaa_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter description = "Create a default config file for Accelerate with only a few flags set." def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False): """ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also set CPU if it is a CPU-only machine. Args: mixed_precision (`str`, *optional*, defaults to "no"): Mixed Precision to use. Should be one of "no", "fp16", or "bf16" save_location (`str`, *optional*, defaults to `default_json_config_file`): Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`. use_xpu (`bool`, *optional*, defaults to `False`): Whether to use XPU if available. """ path = Path(save_location) path.parent.mkdir(parents=True, exist_ok=True) if path.exists(): print( f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." ) return False mixed_precision = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" ) config = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if is_mlu_available(): num_mlus = torch.mlu.device_count() config["num_processes"] = num_mlus config["use_cpu"] = False if num_mlus > 1: config["distributed_type"] = "MULTI_MLU" else: config["distributed_type"] = "NO" if is_sdaa_available(): num_sdaas = torch.sdaa.device_count() config["num_processes"] = num_sdaas config["use_cpu"] = False if num_sdaas > 1: config["distributed_type"] = "MULTI_SDAA" else: config["distributed_type"] = "NO" elif is_musa_available(): num_musas = torch.musa.device_count() config["num_processes"] = num_musas config["use_cpu"] = False if num_musas > 1: config["distributed_type"] = "MULTI_MUSA" else: config["distributed_type"] = "NO" elif torch.cuda.is_available(): num_gpus = torch.cuda.device_count() config["num_processes"] = num_gpus config["use_cpu"] = False if num_gpus > 1: config["distributed_type"] = "MULTI_GPU" else: config["distributed_type"] = "NO" elif is_xpu_available() and use_xpu: num_xpus = torch.xpu.device_count() config["num_processes"] = num_xpus config["use_cpu"] = False if num_xpus > 1: config["distributed_type"] = "MULTI_XPU" else: config["distributed_type"] = "NO" elif is_npu_available(): num_npus = torch.npu.device_count() config["num_processes"] = num_npus config["use_cpu"] = False if num_npus > 1: config["distributed_type"] = "MULTI_NPU" else: config["distributed_type"] = "NO" else: num_xpus = 0 config["use_cpu"] = True config["num_processes"] = 1 config["distributed_type"] = "NO" config["debug"] = False config["enable_cpu_affinity"] = False config = ClusterConfig(**config) config.to_json_file(path) return path def default_command_parser(parser, parents): parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=default_json_config_file, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), dest="save_location", ) parser.add_argument( "--mixed_precision", choices=["no", "fp16", "bf16"], type=str, help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", ) parser.set_defaults(func=default_config_command) return parser def default_config_command(args): config_file = write_basic_config(args.mixed_precision, args.save_location) if config_file: print(f"accelerate configuration saved at {config_file}")
accelerate/src/accelerate/commands/config/default.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/default.py", "repo_id": "accelerate", "token_count": 2440 }
6
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import math from contextlib import suppress from typing import Callable, List, Optional, Union import torch from packaging import version from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler from .logging import get_logger from .state import DistributedType, GradientState, PartialState, is_torch_xla_available from .utils import ( RNGType, broadcast, broadcast_object_list, compare_versions, concatenate, find_batch_size, get_data_structure, initialize_tensors, is_torch_version, is_torchdata_stateful_dataloader_available, send_to_device, slice_tensors, synchronize_rng_states, ) logger = get_logger(__name__) # kwargs of the DataLoader in min version 2.0 _PYTORCH_DATALOADER_KWARGS = { "batch_size": 1, "shuffle": False, "sampler": None, "batch_sampler": None, "num_workers": 0, "collate_fn": None, "pin_memory": False, "drop_last": False, "timeout": 0, "worker_init_fn": None, "multiprocessing_context": None, "generator": None, "prefetch_factor": 2, "persistent_workers": False, "pin_memory_device": "", } # kwargs added after by version _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {"2.6.0": {"in_order": True}} for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items(): if is_torch_version(">=", v): _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs) class SeedableRandomSampler(RandomSampler): """ Same as a random sampler, except that in `__iter__` a seed can be used. Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed and be fully reproducable on multiple iterations. If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on (stored in `self.epoch`). """ def __init__(self, *args, **kwargs): data_seed = kwargs.pop("data_seed", None) super().__init__(*args, **kwargs) self.initial_seed = data_seed if data_seed is not None else torch.random.initial_seed() self.epoch = 0 def __iter__(self): if self.generator is None: self.generator = torch.Generator(device=torch.get_default_device()) self.generator.manual_seed(self.initial_seed) # Allow `self.epoch` to modify the seed of the generator seed = self.epoch + self.initial_seed # print("Setting seed at epoch", self.epoch, seed) self.generator.manual_seed(seed) yield from super().__iter__() self.set_epoch(self.epoch + 1) def set_epoch(self, epoch: int): "Sets the current iteration of the sampler." self.epoch = epoch class BatchSamplerShard(BatchSampler): """ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will always yield a number of batches that is a round multiple of `num_processes` and that all have the same size. Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: batch_sampler (`torch.utils.data.sampler.BatchSampler`): The batch sampler to split in several shards. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in: - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]` then `[6, 7]` if this argument is set to `True`. even_batches (`bool`, *optional*, defaults to `True`): Whether or not to loop back at the beginning of the sampler when the number of samples is not a round multiple of (original batch size / number of processes). <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip>""" def __init__( self, batch_sampler: BatchSampler, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, even_batches: bool = True, ): if split_batches and batch_sampler.batch_size % num_processes != 0: raise ValueError( f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.batch_sampler = batch_sampler self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches self.even_batches = even_batches self.batch_size = getattr(batch_sampler, "batch_size", None) self.drop_last = getattr(batch_sampler, "drop_last", False) if self.batch_size is None and self.even_batches: raise ValueError( "You need to use `even_batches=False` when the batch sampler has no batch size. If you " "are not calling this method directly, set `accelerator.even_batches=False` instead." ) @property def total_length(self): return len(self.batch_sampler) def __len__(self): if self.split_batches: # Split batches does not change the length of the batch sampler return len(self.batch_sampler) if len(self.batch_sampler) % self.num_processes == 0: # If the length is a round multiple of the number of processes, it's easy. return len(self.batch_sampler) // self.num_processes length = len(self.batch_sampler) // self.num_processes if self.drop_last: # Same if we drop the remainder. return length elif self.even_batches: # When we even batches we always get +1 return length + 1 else: # Otherwise it depends on the process index. return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length def __iter__(self): return self._iter_with_split() if self.split_batches else self._iter_with_no_split() def _iter_with_split(self): initial_data = [] batch_length = self.batch_sampler.batch_size // self.num_processes for idx, batch in enumerate(self.batch_sampler): if idx == 0: initial_data = batch if len(batch) == self.batch_size: # If the batch is full, we yield the part of it this process is responsible of. yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] # If drop_last is True of the last batch was full, iteration is over, otherwise... if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size: if not self.even_batches: if len(batch) > batch_length * self.process_index: yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] else: # For degenerate cases where the dataset has less than num_process * batch_size samples while len(initial_data) < self.batch_size: initial_data += initial_data batch = batch + initial_data yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] def _iter_with_no_split(self): initial_data = [] batch_to_yield = [] for idx, batch in enumerate(self.batch_sampler): # We gather the initial indices in case we need to circle back at the end. if not self.drop_last and idx < self.num_processes: initial_data += batch # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually # yielding it. if idx % self.num_processes == self.process_index: batch_to_yield = batch if idx % self.num_processes == self.num_processes - 1 and ( self.batch_size is None or len(batch) == self.batch_size ): yield batch_to_yield batch_to_yield = [] # If drop_last is True, iteration is over, otherwise... if not self.drop_last and len(initial_data) > 0: if not self.even_batches: if len(batch_to_yield) > 0: yield batch_to_yield else: # ... we yield the complete batch we had saved before if it has the proper length if len(batch_to_yield) == self.batch_size: yield batch_to_yield # For degenerate cases where the dataset has less than num_process * batch_size samples while len(initial_data) < self.num_processes * self.batch_size: initial_data += initial_data # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next if len(batch) == self.batch_size: batch = [] idx += 1 # Make sure we yield a multiple of self.num_processes batches cycle_index = 0 while idx % self.num_processes != 0 or len(batch) > 0: end_index = cycle_index + self.batch_size - len(batch) batch += initial_data[cycle_index:end_index] if idx % self.num_processes == self.process_index: yield batch cycle_index = end_index batch = [] idx += 1 class IterableDatasetShard(IterableDataset): """ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will always yield a number of samples that is a round multiple of the actual batch size (depending of the value of `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning. Args: dataset (`torch.utils.data.dataset.IterableDataset`): The batch sampler to split in several shards. batch_size (`int`, *optional*, defaults to 1): The size of the batches per shard (if `split_batches=False`) or the size of the batches (if `split_batches=True`). drop_last (`bool`, *optional*, defaults to `False`): Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning. num_processes (`int`, *optional*, defaults to 1): The number of processes running concurrently. process_index (`int`, *optional*, defaults to 0): The index of the current process. split_batches (`bool`, *optional*, defaults to `False`): Whether the shards should be created by splitting a batch to give a piece of it on each process, or by yielding different full batches on each process. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this argument is set to `False`. - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if this argument is set to `True`. """ def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, split_batches: bool = False, ): if split_batches and batch_size > 1 and batch_size % num_processes != 0: raise ValueError( f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.split_batches = split_batches def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): # We will just raise the downstream error if the underlying dataset is not sized if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size def __iter__(self): if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.epoch) real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) first_batch = None current_batch = [] for element in self.dataset: current_batch.append(element) # Wait to have a full batch before yielding elements. if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] class DataLoaderStateMixin: """ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other useful information that might be needed. **Available attributes:** - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total batch size <Tip warning={true}> Inheriters of this class should ensure that the class creates a `GradientState()` instance, stored in `self.gradient_state`. </Tip> """ def __init_subclass__(cls, **kwargs): cls.end_of_dataloader = False cls.remainder = -1 def reset(self): self.end_of_dataloader = False self.remainder = -1 def begin(self): "Prepares the gradient state for the current dataloader" self.reset() with suppress(Exception): if not self._drop_last: length = getattr(self.dataset, "total_dataset_length", len(self.dataset)) self.remainder = length % self.total_batch_size self.gradient_state._add_dataloader(self) def end(self): "Cleans up the gradient state after exiting the dataloader" self.gradient_state._remove_dataloader(self) class DataLoaderAdapter: """ A class which wraps around a PyTorch `DataLoader` (or variants of it) to be used with the `Accelerator`. For compatability reasons, this class inherits from the class it wraps around, so it can be used as a drop-in. """ def __init__(self, dataset, use_stateful_dataloader=False, batch_sampler=None, **kwargs): self.use_stateful_dataloader = use_stateful_dataloader if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import StatefulDataLoader if use_stateful_dataloader and not is_torchdata_stateful_dataloader_available(): raise ImportError( "StatefulDataLoader is not available. Please install torchdata version 0.8.0 or higher to use it." ) if use_stateful_dataloader: torchdata_version = version.parse(importlib.metadata.version("torchdata")) if ( "in_order" in kwargs and compare_versions(torchdata_version, "<", "0.11") and is_torch_version(">=", "2.6.0") ): kwargs.pop("in_order") self.base_dataloader = StatefulDataLoader(dataset, batch_sampler=batch_sampler, **kwargs) else: self.base_dataloader = DataLoader(dataset, batch_sampler=batch_sampler, **kwargs) if hasattr(self.base_dataloader, "state_dict"): self.dl_state_dict = self.base_dataloader.state_dict() def __getattr__(self, name): # Avoid infinite recursion if we try to access a nonexistent base_dataloader attribute. if name == "base_dataloader": raise AttributeError() # Delegate attribute access to the internal dataloader return getattr(self.base_dataloader, name) def state_dict(self): return self.dl_state_dict def load_state_dict(self, state_dict): self.base_dataloader.load_state_dict(state_dict) @property def __class__(self): """ In order to maintain backwards compatability with other code, we need to ensure `isinstance(obj, DataLoader)` returs true. This is because some downstream code assumes that the `DataLoader` is the base class of the object. """ return self.base_dataloader.__class__ def __len__(self): return len(self.base_dataloader) def adjust_state_dict_for_prefetch(self): """ Adjusts the state dict for prefetching. Natively, this will adjust all of the iters yielded keys in `self.dl_state_dict` by a factor of `num_processes - 1`, however if a custom correction is needed, this can be overridden. This should modify `self.dl_state_dict` directly """ # The state dict will be off by a factor of `n-1` batch too many during DDP, # so we need to adjust it here if PartialState().distributed_type != DistributedType.NO: factor = PartialState().num_processes - 1 if self.dl_state_dict["_sampler_iter_yielded"] > 0: self.dl_state_dict["_sampler_iter_yielded"] -= factor if self.dl_state_dict["_num_yielded"] > 0: self.dl_state_dict["_num_yielded"] -= factor if self.dl_state_dict["_index_sampler_state"] is not None: if ( "samples_yielded" in self.dl_state_dict["_index_sampler_state"] and self.dl_state_dict["_index_sampler_state"]["samples_yielded"] > 0 ): self.dl_state_dict["_index_sampler_state"]["samples_yielded"] -= self.batch_size * factor def _update_state_dict(self): # The state_dict of the underlying base_dataloader may be ahead of what is currently being yielded. # E.g. the implementation of DataLoaderShard involves having an underlying iterator 1 element ahead of # what it wants to yield. # # _update_state_dict is called to snapshot the state_dict that would properly recover the DataLoaderAdapter. if hasattr(self.base_dataloader, "state_dict"): self.dl_state_dict = self.base_dataloader.state_dict() # Potentially modify the state_dict to adjust for prefetching self.adjust_state_dict_for_prefetch() # Then tag if we are at the end of the dataloader self.dl_state_dict["_iterator_finished"] = self.end_of_dataloader class DataLoaderShard(DataLoaderAdapter, DataLoaderStateMixin): """ Subclass of `DataLoaderAdapter` that will deal with device placement and current distributed setup. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this dataloader. device (`torch.device`, *optional*): If passed, the device to put all batches on. rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: an optional `torch.Generator` synchronized_generator (`torch.Generator`, *optional*): A random number generator to keep synchronized across processes. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. **kwargs (additional keyword arguments, *optional*): All other keyword arguments to pass to the regular `DataLoader` initialization. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, use_stateful_dataloader=False, _drop_last: bool = False, _non_blocking: bool = False, torch_device_mesh=None, **kwargs, ): super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) self.device = device self.rng_types = rng_types self.synchronized_generator = synchronized_generator self.skip_batches = skip_batches self.gradient_state = GradientState() self._drop_last = _drop_last self._non_blocking = _non_blocking self.iteration = 0 def __iter__(self): if self.rng_types is not None: synchronize_rng_states(self.rng_types, self.synchronized_generator) self.begin() self.set_epoch(self.iteration) dataloader_iter = self.base_dataloader.__iter__() # We iterate one batch ahead to check when we are at the end try: current_batch = next(dataloader_iter) except StopIteration: yield batch_index = 0 while True: try: # But we still move it to the device so it is done before `StopIteration` is reached if self.device is not None: current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking) self._update_state_dict() next_batch = next(dataloader_iter) if batch_index >= self.skip_batches: yield current_batch batch_index += 1 current_batch = next_batch except StopIteration: self.end_of_dataloader = True self._update_state_dict() if batch_index >= self.skip_batches: yield current_batch break self.iteration += 1 self.end() def __reduce__(self): """ Define the `__reduce__` method to ensure a `DataLoaderShard` can be pickled and unpickled. This needs to be explicitly defined since default pickling behavior is broken by `DataLoaderAdapter` messing with its `__class__` member. """ args = super().__reduce__() return (DataLoaderShard, *args[1:]) def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler, "set_epoch"): self.batch_sampler.set_epoch(epoch) if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) # We support if a custom `Dataset` implementation has `set_epoch` # or in general HF datasets `Datasets` elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) @property def total_batch_size(self): batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler return ( batch_sampler.batch_size if getattr(batch_sampler, "split_batches", False) else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1)) ) @property def total_dataset_length(self): if hasattr(self.dataset, "total_length"): return self.dataset.total_length else: return len(self.dataset) def get_sampler(self): return get_sampler(self) def set_sampler(self, sampler): sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) if sampler_is_batch_sampler: self.sampler.sampler = sampler else: self.batch_sampler.sampler = sampler if hasattr(self.batch_sampler, "batch_sampler"): self.batch_sampler.batch_sampler.sampler = sampler if is_torch_xla_available(): import torch_xla.distributed.parallel_loader as xpl class MpDeviceLoaderWrapper(xpl.MpDeviceLoader): """ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main thread only. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__(self, dataloader: DataLoaderShard, device: torch.device): super().__init__(dataloader, device) self._rng_types = self._loader.rng_types self._loader.rng_types = None self.device = device def __iter__(self): if self._rng_types is not None: synchronize_rng_states(self._rng_types, self._loader.synchronized_generator) return super().__iter__() def set_epoch(self, epoch: int): if hasattr(self.dataloader, "set_epoch"): self.dataloader.set_epoch(epoch) @property def total_batch_size(self): return self._loader.total_batch_size @property def total_dataset_length(self): return self._loader.total_dataset_length @property def batch_sampler(self): return self._loader.batch_sampler @property def dataloader(self): return self._loader class DataLoaderDispatcher(DataLoaderAdapter, DataLoaderStateMixin): """ Subclass of `DataLoaderAdapter` that will iterate and preprocess on process 0 only, then dispatch on each process their part of the batch. Args: split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning of an iteration. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): Whether to have this class adapt `StatefulDataLoader` from `torchdata` instead of the regular `DataLoader`. **Available attributes:** - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total number of processes - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. """ def __init__( self, dataset, split_batches: bool = False, skip_batches=0, use_stateful_dataloader=False, _drop_last: bool = False, _non_blocking: bool = False, slice_fn=None, torch_device_mesh=None, **kwargs, ): shuffle = False from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe # We need to save the shuffling state of the DataPipe if isinstance(dataset, ShufflerIterDataPipe): shuffle = dataset._shuffle_enabled super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) self.split_batches = split_batches if shuffle: torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) self.gradient_state = GradientState() self.state = PartialState() self._drop_last = _drop_last self._non_blocking = _non_blocking self.skip_batches = skip_batches self.torch_device_mesh = torch_device_mesh self.slice_fn = slice_tensors if slice_fn is None else slice_fn self.iteration = 0 # if a device mesh is provided extract each dimension (dp, fsdp, tp) # device mesh may hold any number of dimensions, however, # below code is for targetted support for dp, fsdp and tp # device mesh will be used only if there is tp involved # or any multi-dimensional parallelism involving tp # (dp, tp) (fsdp, tp) (dp, fsdp, tp) # otherwise the default behavour not using device mesh should be sufficient # since multi dimensional parallelism devoid of tp would anyway need # different batches for each process irrespective of dp or fsdp self.submesh_tp = None self.submesh_dp = None self.submesh_fsdp = None if self.torch_device_mesh and "tp" in self.torch_device_mesh.mesh_dim_names: self.submesh_tp = self.torch_device_mesh["tp"] if "dp" in self.torch_device_mesh.mesh_dim_names: self.submesh_dp = self.torch_device_mesh["dp"] if "fsdp" in self.torch_device_mesh.mesh_dim_names: self.submesh_fsdp = self.torch_device_mesh["fsdp"] if self.submesh_tp and (self.submesh_dp or self.submesh_fsdp): raise ValueError("TP + (DP/FSDP) is not yet supported in dispatch mode") def _fetch_batches(self, iterator): batches, batch = None, None # On process 0, we gather the batch to dispatch. if self.state.process_index == 0: # Procedure to support TP only is simpler # since we want to dispatch the same batch of samples across all ranks # this removes complexity of handling multiple tp rank groups when TP + DP # combination is involved. try: # for TP case avoid using split_batches # since it would mean that the dataloader should be spilling out # duplicates of batches. if self.split_batches: # One batch of the main iterator is dispatched and split. if self.submesh_tp: logger.warning( "Use of split_batches for TP would need the dataloader to produce duplicate batches," "otherwise, use dispatch_batches=True instead." ) self._update_state_dict() batch = next(iterator) else: # num_processes batches of the main iterator are concatenated then dispatched and split. # We add the batches one by one so we have the remainder available when drop_last=False. batches = [] if self.submesh_tp: # when tp, extract single batch and then replicate self._update_state_dict() batch = next(iterator) batches = [batch] * self.state.num_processes else: for _ in range(self.state.num_processes): self._update_state_dict() batches.append(next(iterator)) try: batch = concatenate(batches, dim=0) except RuntimeError as e: raise RuntimeError( "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`." "either pass `dispatch_batches=False` and have each process fetch its own batch " " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and " "slice it into `num_processes` batches for each process." ) from e # In both cases, we need to get the structure of the batch that we will broadcast on other # processes to initialize the tensors with the right shape. # data_structure, stop_iteration batch_info = [get_data_structure(batch), False] except StopIteration: batch_info = [None, True] else: batch_info = [None, self._stop_iteration] # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. broadcast_object_list(batch_info) self._stop_iteration = batch_info[1] if self._stop_iteration: # If drop_last is False and split_batches is False, we may have a remainder to take care of. if not self.split_batches and not self._drop_last: if self.state.process_index == 0 and len(batches) > 0: batch = concatenate(batches, dim=0) batch_info = [get_data_structure(batch), False] else: batch_info = [None, True] broadcast_object_list(batch_info) return batch, batch_info def __iter__(self): self.begin() self.set_epoch(self.iteration) main_iterator = None if is_torch_version(">=", "2.0.1"): # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts # shared seed to all dist processes. Thus, we need to create iterator for all dist processes. # But, we only iterate through the DataLoader on process 0. main_iterator = self.base_dataloader.__iter__() elif self.state.process_index == 0: main_iterator = self.base_dataloader.__iter__() stop_iteration = False self._stop_iteration = False first_batch = None next_batch, next_batch_info = self._fetch_batches(main_iterator) batch_index = 0 while not stop_iteration: batch, batch_info = next_batch, next_batch_info if self.state.process_index != 0: # Initialize tensors on other processes than process 0. batch = initialize_tensors(batch_info[0]) batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking) # Broadcast the batch before splitting it. batch = broadcast(batch, from_process=0) if not self._drop_last and first_batch is None: # We keep at least num processes elements of the first batch to be able to complete the last batch first_batch = self.slice_fn( batch, slice(0, self.state.num_processes), process_index=self.state.process_index, num_processes=self.state.num_processes, ) if batch is None: raise ValueError( f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration." ) observed_batch_size = find_batch_size(batch) batch_size = observed_batch_size // self.state.num_processes stop_iteration = self._stop_iteration if not stop_iteration: # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in # the dataloader since the number of batches is a round multiple of the number of processes. next_batch, next_batch_info = self._fetch_batches(main_iterator) # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. if self._stop_iteration and next_batch_info[0] is None: stop_iteration = True if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: # If the last batch is not complete, let's add the first batch to it. batch = concatenate([batch, first_batch], dim=0) # Batch size computation above is wrong, it's off by 1 so we fix it. batch_size += 1 data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) batch = self.slice_fn( batch, data_slice, process_index=self.state.process_index, num_processes=self.state.num_processes, ) if stop_iteration: self.end_of_dataloader = True self._update_state_dict() self.remainder = observed_batch_size if batch_index >= self.skip_batches: yield batch batch_index += 1 self.iteration += 1 self.end() def set_epoch(self, epoch: int): # In case it is manually passed in, the user can set it to what they like if self.iteration != epoch: self.iteration = epoch if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(epoch) elif hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __len__(self): whole_length = len(self.base_dataloader) if self.split_batches: return whole_length elif self._drop_last: return whole_length // self.state.num_processes else: return math.ceil(whole_length / self.state.num_processes) def __reduce__(self): """ Define the `__reduce__` method to ensure a `DataLoaderDispatcher` can be pickled and unpickled. This needs to be explicitly defined since default pickling behavior is broken by `DataLoaderAdapter` messing with its `__class__` member. """ args = super().__reduce__() return (DataLoaderDispatcher, *args[1:]) @property def total_batch_size(self): return ( self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) ) @property def total_dataset_length(self): return len(self.dataset) def get_sampler(self): return get_sampler(self) def set_sampler(self, sampler): sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) if sampler_is_batch_sampler: self.sampler.sampler = sampler else: self.batch_sampler.sampler = sampler if hasattr(self.batch_sampler, "batch_sampler"): self.batch_sampler.batch_sampler.sampler = sampler def get_sampler(dataloader): """ Get the sampler associated to the dataloader Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. Returns: `torch.utils.data.Sampler`: The sampler associated to the dataloader """ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) if sampler_is_batch_sampler: sampler = getattr(dataloader.sampler, "sampler", None) else: sampler = getattr(dataloader.batch_sampler, "sampler", None) return sampler def prepare_data_loader( dataloader: DataLoader, device: Optional[torch.device] = None, num_processes: Optional[int] = None, process_index: Optional[int] = None, split_batches: bool = False, put_on_device: bool = False, rng_types: Optional[List[Union[str, RNGType]]] = None, dispatch_batches: Optional[bool] = None, even_batches: bool = True, slice_fn_for_dispatch: Optional[Callable] = None, use_seedable_sampler: bool = False, data_seed: Optional[int] = None, non_blocking: bool = False, use_stateful_dataloader: bool = False, torch_device_mesh=None, ) -> DataLoader: """ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration at the first batch that would be too small / not present on all processes or loop with indices from the beginning. Args: dataloader (`torch.utils.data.dataloader.DataLoader`): The data loader to split across several devices. device (`torch.device`): The target device for the returned `DataLoader`. num_processes (`int`, *optional*): The number of processes running concurrently. Will default to the value given by [`~state.PartialState`]. process_index (`int`, *optional*): The index of the current process. Will default to the value given by [`~state.PartialState`]. split_batches (`bool`, *optional*, defaults to `False`): Whether the resulting `DataLoader` should split the batches of the original data loader across devices or yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of `batch_size`. put_on_device (`bool`, *optional*, defaults to `False`): Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or dictionaries of tensors). rng_types (list of `str` or [`~utils.RNGType`]): The list of random number generators to synchronize at the beginning of each iteration. Should be one or several of: - `"torch"`: the base torch random number generator - `"cuda"`: the CUDA random number generator (GPU only) - `"xla"`: the XLA random number generator (TPU only) - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. dispatch_batches (`bool`, *optional*): If set to `True`, the dataloader prepared is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` when the underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, *optional*, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. slice_fn_for_dispatch (`Callable`, *optional*`): If passed, this function will be used to slice tensors across `num_processes`. Will default to [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be ignored otherwise. use_seedable_sampler (`bool`, *optional*, defaults to `False`): Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better reproducability. Comes at a cost of potentially different performances due to different shuffling algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every `self.set_epoch` data_seed (`int`, *optional*, defaults to `None`): The seed to use for the underlying generator when using `use_seedable_sampler`. If `None`, the generator will use the current default seed from torch. non_blocking (`bool`, *optional*, defaults to `False`): If set to `True`, dataloader will utilize non-blocking host-to-device transfers. If the dataloader has `pin_memory` set to `True`, this will help to increase overlap between data transfer and computations. use_stateful_dataloader (`bool`, *optional*, defaults to `False`): "If set to true, the dataloader prepared by the Accelerator will be backed by " "[torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed." torch_device_mesh (`torch.distributed.DeviceMesh`, *optional*, defaults to `None`): PyTorch device mesh. Returns: `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches <Tip warning={true}> `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` equal to `False` </Tip> """ if dispatch_batches is None: if not put_on_device: dispatch_batches = False else: dispatch_batches = isinstance(dataloader.dataset, IterableDataset) if dispatch_batches and not put_on_device: raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.") # Grab defaults from PartialState state = PartialState() if num_processes is None: num_processes = state.num_processes if process_index is None: process_index = state.process_index # when device mesh is used, specifically with TP # then there is need to update process_index and num_processes # to bring in the effect of generating same batch across TP ranks # and different batch across FSDP and DP ranks. # Example: # if device mesh is (dp,fsdp,tp) = (2, 2, 3) # ranks would range from 0...11 # from data angle ranks should look like 0 0 0 1 1 1 2 2 2 3 3 3 # processes with same ranks/ids would receive the same batch if torch_device_mesh: submesh_fsdp_size = 1 submesh_dp_size = 1 submesh_tp_size = 1 if "tp" in torch_device_mesh.mesh_dim_names: submesh_tp_size = torch_device_mesh["tp"].size() if "dp" in torch_device_mesh.mesh_dim_names: submesh_dp_size = torch_device_mesh["dp"].size() if "fsdp" in torch_device_mesh.mesh_dim_names: submesh_fsdp_size = torch_device_mesh["fsdp"].size() process_index = process_index // submesh_tp_size num_processes = submesh_fsdp_size * submesh_dp_size # Sanity check if split_batches: if dataloader.batch_size is not None: batch_size_for_check = dataloader.batch_size else: # For custom batch_sampler if hasattr(dataloader.batch_sampler, "batch_size"): batch_size_for_check = dataloader.batch_sampler.batch_size else: raise ValueError( "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed " "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. " "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` " f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set." ) if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0: raise ValueError( f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) " f"needs to be a round multiple of the number of processes ({num_processes})." ) new_dataset = dataloader.dataset # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) synchronized_generator = None sampler = get_sampler(dataloader) if isinstance(sampler, RandomSampler) and use_seedable_sampler: # When iterating through the dataloader during distributed processes # we want to ensure that on each process we are iterating through the same # samples in the same order if a seed is set. This requires a tweak # to the `torch.utils.data.RandomSampler` class (if used). sampler = SeedableRandomSampler( data_source=sampler.data_source, replacement=sampler.replacement, num_samples=sampler._num_samples, generator=getattr(sampler, "generator", torch.Generator(device=torch.get_default_device())), data_seed=data_seed, ) if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA: # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled. generator = torch.Generator(device=torch.get_default_device()).manual_seed(42) dataloader.generator = generator dataloader.sampler.generator = generator # No change if no multiprocess if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches: if isinstance(new_dataset, IterableDataset): if getattr(dataloader.dataset, "generator", None) is not None: synchronized_generator = dataloader.dataset.generator new_dataset = IterableDatasetShard( new_dataset, batch_size=dataloader.batch_size, drop_last=dataloader.drop_last, num_processes=num_processes, process_index=process_index, split_batches=split_batches, ) else: if not use_seedable_sampler and hasattr(sampler, "generator"): if sampler.generator is None: sampler.generator = torch.Generator(device=torch.get_default_device()) synchronized_generator = sampler.generator batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler new_batch_sampler = BatchSamplerShard( batch_sampler, num_processes=num_processes, process_index=process_index, split_batches=split_batches, even_batches=even_batches, ) # We ignore all of those since they are all dealt with by our new_batch_sampler ignore_kwargs = [ "batch_size", "shuffle", "sampler", "batch_sampler", "drop_last", ] if rng_types is not None and synchronized_generator is None and "generator" in rng_types: rng_types.remove("generator") kwargs = { k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs } # Need to provide batch_size as batch_sampler is None for Iterable dataset if new_batch_sampler is None: kwargs["drop_last"] = dataloader.drop_last kwargs["batch_size"] = ( dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size ) if dispatch_batches: kwargs.pop("generator") dataloader = DataLoaderDispatcher( new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, slice_fn=slice_fn_for_dispatch, use_stateful_dataloader=use_stateful_dataloader, torch_device_mesh=torch_device_mesh, **kwargs, ) elif sampler_is_batch_sampler: dataloader = DataLoaderShard( new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, sampler=new_batch_sampler, batch_size=dataloader.batch_size, rng_types=rng_types, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, synchronized_generator=synchronized_generator, use_stateful_dataloader=use_stateful_dataloader, **kwargs, ) else: dataloader = DataLoaderShard( new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, batch_sampler=new_batch_sampler, rng_types=rng_types, synchronized_generator=synchronized_generator, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, use_stateful_dataloader=use_stateful_dataloader, **kwargs, ) if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler: dataloader.set_sampler(sampler) if state.distributed_type == DistributedType.XLA: return MpDeviceLoaderWrapper(dataloader, device) return dataloader class SkipBatchSampler(BatchSampler): """ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`. Should not be used if the original dataloader is a `StatefulDataLoader`. """ def __init__(self, batch_sampler, skip_batches=0): self.batch_sampler = batch_sampler self.skip_batches = skip_batches def __iter__(self): for index, samples in enumerate(self.batch_sampler): if index >= self.skip_batches: yield samples @property def total_length(self): return len(self.batch_sampler) def __len__(self): return len(self.batch_sampler) - self.skip_batches class SkipDataLoader(DataLoaderAdapter, DataLoaderStateMixin): """ Subclass of a PyTorch `DataLoader` that will skip the first batches. Generally it's preferable to use `skip_first_batches`/`torchdata.StatefulDataLoader` instead of this class. Args: dataset (`torch.utils.data.dataset.Dataset`): The dataset to use to build this dataloader. skip_batches (`int`, *optional*, defaults to 0): The number of batches to skip at the beginning. kwargs: All other keyword arguments to pass to the regular `DataLoader` initialization. """ def __init__(self, dataset, skip_batches=0, use_stateful_dataloader=False, **kwargs): super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs) self.skip_batches = skip_batches self.gradient_state = GradientState() def __iter__(self): self.begin() for index, batch in enumerate(self.base_dataloader.__iter__()): if index >= self.skip_batches: self._update_state_dict() yield batch self.end() def __len__(self): return len(self.base_dataloader) - self.skip_batches def __reduce__(self): """ Define the `__reduce__` method to ensure a `SkipDataLoader` can be pickled and unpickled. This needs to be explicitly defined since default pickling behavior is broken by `DataLoaderAdapter` messing with its `__class__` member. """ args = super().__reduce__() return (SkipDataLoader, *args[1:]) def skip_first_batches(dataloader, num_batches=0): """ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. Should not be used if the original dataloader is a `StatefulDataLoader`. """ state = PartialState() if state.distributed_type == DistributedType.XLA: device = dataloader.device dataloader = dataloader.dataloader dataset = dataloader.dataset sampler_is_batch_sampler = False if isinstance(dataset, IterableDataset): new_batch_sampler = None else: sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches) # We ignore all of those since they are all dealt with by our new_batch_sampler ignore_kwargs = [ "batch_size", "shuffle", "sampler", "batch_sampler", "drop_last", ] kwargs = { k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs } # Need to provide batch_size as batch_sampler is None for Iterable dataset if new_batch_sampler is None: kwargs["drop_last"] = dataloader.drop_last kwargs["batch_size"] = dataloader.batch_size if isinstance(dataloader, DataLoaderDispatcher): if new_batch_sampler is None: # Need to manually skip batches in the dataloader kwargs["skip_batches"] = num_batches dataloader = DataLoaderDispatcher( dataset, split_batches=dataloader.split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader._drop_last, **kwargs, ) elif isinstance(dataloader, DataLoaderShard): if new_batch_sampler is None: # Need to manually skip batches in the dataloader kwargs["skip_batches"] = num_batches elif sampler_is_batch_sampler: kwargs["sampler"] = new_batch_sampler kwargs["batch_size"] = dataloader.batch_size else: kwargs["batch_sampler"] = new_batch_sampler dataloader = DataLoaderShard( dataset, device=dataloader.device, rng_types=dataloader.rng_types, synchronized_generator=dataloader.synchronized_generator, **kwargs, ) else: if new_batch_sampler is None: # Need to manually skip batches in the dataloader dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs) else: dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs) if state.distributed_type == DistributedType.XLA: dataloader = MpDeviceLoaderWrapper(dataloader, device) return dataloader
accelerate/src/accelerate/data_loader.py/0
{ "file_path": "accelerate/src/accelerate/data_loader.py", "repo_id": "accelerate", "token_count": 26992 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader, IterableDataset from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator, DataLoaderConfiguration, DistributedType from accelerate.data_loader import DataLoaderDispatcher from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device from accelerate.utils import is_torch_xla_available, set_seed os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true" class ListHandler(logging.Handler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.logs = [] def emit(self, record): self.logs.append(record) def get_basic_setup(accelerator, num_samples=82, batch_size=16): "Returns everything needed to perform basic training" set_seed(42) model = RegressionModel() ddp_model = deepcopy(model) dset = RegressionDataset(length=num_samples) dataloader = DataLoader(dset, batch_size=batch_size) model.to(accelerator.device) ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) return model, ddp_model, dataloader def get_dataloader(accelerator: Accelerator, use_longest=False): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") dataset = load_dataset("glue", "mrpc", split="validation") def tokenize_function(examples): outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs with accelerator.main_process_first(): tokenized_datasets = dataset.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): if use_longest: return tokenizer.pad(examples, padding="longest", return_tensors="pt") return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16) def get_mrpc_setup(dispatch_batches, split_batches): dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches) accelerator = Accelerator(dataloader_config=dataloader_config) dataloader = get_dataloader(accelerator, not dispatch_batches) model = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased", return_dict=True ) ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader) return { "ddp": [ddp_model, ddp_dataloader, torch_device], "no": [model, dataloader, accelerator.device], }, accelerator def generate_predictions(model, dataloader, accelerator): logits_and_targets = [] for batch in dataloader: input, target = batch.values() with torch.no_grad(): logit = model(input) logit, target = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) logits, targs = [], [] for logit, targ in logits_and_targets: logits.append(logit) targs.append(targ) logits, targs = torch.cat(logits), torch.cat(targs) return logits, targs def test_torch_metrics( accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16 ): _, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size) logits, _ = generate_predictions(ddp_model, dataloader, accelerator) assert ( len(logits) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}" def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False): metric = evaluate.load("glue", "mrpc") setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches) # First do baseline model, dataloader, device = setup["no"] model.to(device) model.eval() for batch in dataloader: batch.to(device) with torch.inference_mode(): outputs = model(**batch) preds = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=preds, references=batch["labels"]) baseline = metric.compute() # Then do distributed model, dataloader, device = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): outputs = model(**batch) preds = outputs.logits.argmax(dim=-1) references = batch["labels"] preds, references = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=preds, references=references) distributed = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset(): class DummyIterableDataset(IterableDataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __iter__(self): yield from self.data iterable_dataset = DummyIterableDataset([n for n in range(30)]) dataloader = DataLoader(iterable_dataset, batch_size=4) accelerator = Accelerator() prepared_dataloader = accelerator.prepare(dataloader) if accelerator.is_main_process: logger = logging.root.manager.loggerDict["accelerate.accelerator"] list_handler = ListHandler() logger.addHandler(list_handler) batches_for_metrics = [] for batch in prepared_dataloader: batches_for_metrics.append(accelerator.gather_for_metrics(batch)) assert torch.cat(batches_for_metrics).size(0) == 30 if accelerator.is_main_process: assert len(list_handler.logs) == 0 logger.removeHandler(list_handler) def test_gather_for_metrics_with_iterable_dataset(): class DummyIterableDataset(IterableDataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __iter__(self): yield from self.data iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30))) dataloader = DataLoader(iterable_dataset, batch_size=4) accelerator = Accelerator() prepared_dataloader = accelerator.prepare(dataloader) assert isinstance(prepared_dataloader, DataLoaderDispatcher) if accelerator.is_main_process: logger = logging.root.manager.loggerDict["accelerate.accelerator"] list_handler = ListHandler() logger.addHandler(list_handler) batches_for_metrics = [] for batch in prepared_dataloader: batches_for_metrics.append(accelerator.gather_for_metrics(batch)) assert torch.cat(batches_for_metrics).size(0) == 30 if accelerator.is_main_process: assert len(list_handler.logs) == 0 logger.removeHandler(list_handler) def test_gather_for_metrics_drop_last(): accelerator = Accelerator() per_device_batch_size = 5 num_items = (10 * accelerator.num_processes) + 1 dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True) dataloader = accelerator.prepare(dataloader) iterator = iter(dataloader) next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0') batch = next(iterator) gathered_items = accelerator.gather_for_metrics(batch) # Should return a full set of complete batches from each GPU num_expected_items = per_device_batch_size * accelerator.num_processes assert gathered_items.size(0) == ( num_expected_items ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}" def main(): dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False) accelerator = Accelerator(dataloader_config=dataloader_config) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # TorchXLA does not support batch dispatching. 'put_on_device' is always False for # TorchXLA, which can cause a value error in 'prepare_data_loader' function. dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False] # Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for # inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug. # These are a bit slower so they should only be ran on the GPU or TPU if accelerator.device.type != "cpu" and not is_torch_xla_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**") for split_batches in [True, False]: for dispatch_batches in dispatch_batches_options: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`") test_mrpc(dispatch_batches, split_batches) accelerator.state._reset_state() print("test_gather_for_metrics_with_iterable_dataset") test_gather_for_metrics_with_iterable_dataset() print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset") test_gather_for_metrics_with_non_tensor_objects_iterable_dataset() # MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache. # This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended. # Skip this test when TorchXLA is enabled. if accelerator.state.distributed_type != DistributedType.XLA: if accelerator.is_local_main_process: print("**Test torch metrics**") for split_batches in [True, False]: for dispatch_batches in dispatch_batches_options: dataloader_config = DataLoaderConfiguration( split_batches=split_batches, dispatch_batches=dispatch_batches ) accelerator = Accelerator(dataloader_config=dataloader_config) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99") test_torch_metrics(accelerator, 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**") accelerator = Accelerator() test_torch_metrics(accelerator, 512) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test that `drop_last` is taken into account**") test_gather_for_metrics_drop_last() accelerator.end_training() accelerator.state._reset_state() def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py", "repo_id": "accelerate", "token_count": 4714 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .ao import convert_model_to_fp8_ao, filter_first_and_last_linear_layers, has_ao_layers from .constants import ( MITA_PROFILING_AVAILABLE_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME, PROFILE_PATTERN_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SAFE_WEIGHTS_PATTERN_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_DISTRIBUTED_OPERATION_TYPES, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, WEIGHTS_PATTERN_NAME, XPU_PROFILING_AVAILABLE_PYTORCH_VERSION, ) from .dataclasses import ( AORecipeKwargs, AutocastKwargs, BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DataLoaderConfiguration, DDPCommunicationHookType, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FP8RecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, MSAMPRecipeKwargs, PrecisionType, ProfileKwargs, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TERecipeKwargs, TorchDynamoPlugin, TorchTensorParallelPlugin, add_model_config_to_megatron_parser, ) from .environment import ( are_libraries_initialized, check_cuda_p2p_ib_support, check_fp8_capability, clear_environment, convert_dict_to_env_variables, get_cpu_distributed_information, get_gpu_info, get_int_from_env, parse_choice_from_env, parse_flag_from_env, patch_environment, purge_accelerate_environment, set_numa_affinity, str_to_bool, ) from .imports import ( deepspeed_required, get_ccl_version, is_4bit_bnb_available, is_8bit_bnb_available, is_aim_available, is_bf16_available, is_bitsandbytes_multi_backend_available, is_bnb_available, is_boto3_available, is_ccl_available, is_clearml_available, is_comet_ml_available, is_cuda_available, is_datasets_available, is_deepspeed_available, is_dvclive_available, is_fp8_available, is_import_timer_available, is_ipex_available, is_lomo_available, is_megatron_lm_available, is_mlflow_available, is_mlu_available, is_mps_available, is_msamp_available, is_musa_available, is_npu_available, is_pandas_available, is_peft_available, is_pippy_available, is_pynvml_available, is_pytest_available, is_rich_available, is_sagemaker_available, is_schedulefree_available, is_sdaa_available, is_tensorboard_available, is_timm_available, is_torch_xla_available, is_torchao_available, is_torchdata_available, is_torchdata_stateful_dataloader_available, is_torchvision_available, is_transformer_engine_available, is_transformers_available, is_triton_available, is_wandb_available, is_weights_only_available, is_xpu_available, torchao_required, ) from .modeling import ( align_module_device, calculate_maximum_sizes, check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_grad_scaler, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, has_offloaded_params, id_tensor_storage, infer_auto_device_map, is_peft_model, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( CannotPadNestedTensorWarning, GatheredParameters, broadcast, broadcast_object_list, concatenate, convert_outputs_to_fp32, convert_to_fp32, copy_tensor_to_devices, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, ignorant_find_batch_size, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, pad_input_tensors, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, get_active_deepspeed_plugin, map_pytorch_optim_to_deepspeed, ) from .bnb import has_4bit_bnb_layers, load_and_quantize_model from .fsdp_utils import ( disable_fsdp_ram_efficient_loading, enable_fsdp_ram_efficient_loading, ensure_weights_retied, load_fsdp_model, load_fsdp_optimizer, merge_fsdp_weights, save_fsdp_model, save_fsdp_optimizer, ) from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) # For docs from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, T5TrainStep, avg_losses_across_data_parallel_group, ) if is_megatron_lm_available(): from .megatron_lm import ( MegatronEngine, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model_optimizer_scheduler as megatron_lm_prepare_model_optimizer_scheduler from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( check_os_kernel, clean_state_dict_for_safetensors, convert_bytes, extract_model_from_parallel, get_pretty_name, is_port_in_use, load, merge_dicts, recursive_getattr, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import ( apply_fp8_autowrap, contextual_fp8_autocast, convert_model, has_transformer_engine_layers, )
accelerate/src/accelerate/utils/__init__.py/0
{ "file_path": "accelerate/src/accelerate/utils/__init__.py", "repo_id": "accelerate", "token_count": 3154 }
9
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from typing import List, Optional, Union import numpy as np import torch from ..state import AcceleratorState from .constants import CUDA_DISTRIBUTED_TYPES from .dataclasses import DistributedType, RNGType from .imports import ( is_mlu_available, is_musa_available, is_npu_available, is_sdaa_available, is_torch_xla_available, is_xpu_available, ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. Args: seed (`int`): The seed to set. device_specific (`bool`, *optional*, defaults to `False`): Whether to differ the seed on each device slightly with `self.process_index`. deterministic (`bool`, *optional*, defaults to `False`): Whether to use deterministic algorithms where available. Can slow down training. """ if device_specific: seed += AcceleratorState().process_index random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_xpu_available(): torch.xpu.manual_seed_all(seed) elif is_npu_available(): torch.npu.manual_seed_all(seed) elif is_mlu_available(): torch.mlu.manual_seed_all(seed) elif is_sdaa_available(): torch.sdaa.manual_seed_all(seed) elif is_musa_available(): torch.musa.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_torch_xla_available(): xm.set_rng_state(seed) if deterministic: torch.use_deterministic_algorithms(True) def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None): # Get the proper rng state if rng_type == RNGType.TORCH: rng_state = torch.get_rng_state() elif rng_type == RNGType.CUDA: rng_state = torch.cuda.get_rng_state() elif rng_type == RNGType.XLA: assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable." rng_state = torch.tensor(xm.get_rng_state()) elif rng_type == RNGType.NPU: assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs." rng_state = torch.npu.get_rng_state() elif rng_type == RNGType.MLU: assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs." rng_state = torch.mlu.get_rng_state() elif rng_type == RNGType.SDAA: assert is_sdaa_available(), "Can't synchronize SDAA seeds on an environment without SDAAs." rng_state = torch.sdaa.get_rng_state() elif rng_type == RNGType.MUSA: assert is_musa_available(), "Can't synchronize MUSA seeds on an environment without MUSAs." rng_state = torch.musa.get_rng_state() elif rng_type == RNGType.XPU: assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs." rng_state = torch.xpu.get_rng_state() elif rng_type == RNGType.GENERATOR: assert generator is not None, "Need a generator to synchronize its seed." rng_state = generator.get_state() # Broadcast the rng state from device 0 to other devices state = AcceleratorState() if state.distributed_type == DistributedType.XLA: rng_state = rng_state.to(xm.xla_device()) xm.collective_broadcast([rng_state]) xm.mark_step() rng_state = rng_state.cpu() elif ( state.distributed_type in CUDA_DISTRIBUTED_TYPES or state.distributed_type == DistributedType.MULTI_MLU or state.distributed_type == DistributedType.MULTI_SDAA or state.distributed_type == DistributedType.MULTI_MUSA or state.distributed_type == DistributedType.MULTI_NPU or state.distributed_type == DistributedType.MULTI_XPU ): rng_state = rng_state.to(state.device) torch.distributed.broadcast(rng_state, 0) rng_state = rng_state.cpu() elif state.distributed_type == DistributedType.MULTI_CPU: torch.distributed.broadcast(rng_state, 0) # Set the broadcast rng state if rng_type == RNGType.TORCH: torch.set_rng_state(rng_state) elif rng_type == RNGType.CUDA: torch.cuda.set_rng_state(rng_state) elif rng_type == RNGType.NPU: torch.npu.set_rng_state(rng_state) elif rng_type == RNGType.MLU: torch.mlu.set_rng_state(rng_state) elif rng_type == RNGType.SDAA: torch.sdaa.set_rng_state(rng_state) elif rng_type == RNGType.MUSA: torch.musa.set_rng_state(rng_state) elif rng_type == RNGType.XPU: torch.xpu.set_rng_state(rng_state) elif rng_type == RNGType.XLA: xm.set_rng_state(rng_state.item()) elif rng_type == RNGType.GENERATOR: generator.set_state(rng_state) def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None): for rng_type in rng_types: synchronize_rng_state(RNGType(rng_type), generator=generator)
accelerate/src/accelerate/utils/random.py/0
{ "file_path": "accelerate/src/accelerate/utils/random.py", "repo_id": "accelerate", "token_count": 2419 }
10
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from unittest.mock import patch import torch from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError import accelerate.commands.test as accelerate_test_cmd from accelerate.commands.config.config_args import BaseConfig, ClusterConfig, SageMakerConfig, load_config_from_file from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data from accelerate.commands.launch import _validate_launch_command, launch_command, launch_command_parser from accelerate.commands.tpu import tpu_command_launcher, tpu_command_parser from accelerate.test_utils.testing import ( capture_call_output, path_in_accelerate_package, require_multi_device, require_timm, require_transformers, run_command, ) from accelerate.utils import patch_environment from accelerate.utils.launch import prepare_simple_launcher_cmd_env class AccelerateLauncherTester(unittest.TestCase): """ Test case for verifying the `accelerate launch` CLI operates correctly. If a `default_config.yaml` file is located in the cache it will temporarily move it for the duration of the tests. """ test_file_path = path_in_accelerate_package("test_utils", "scripts", "test_cli.py") notebook_launcher_path = path_in_accelerate_package("test_utils", "scripts", "test_notebook.py") config_folder = Path.home() / ".cache/huggingface/accelerate" config_file = "default_config.yaml" config_path = config_folder / config_file changed_path = config_folder / "_default_config.yaml" test_config_path = Path("tests/test_configs") parser = launch_command_parser() @classmethod def setUpClass(cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def tearDownClass(cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def test_no_config(self): args = ["--monitor_interval", "0.1", str(self.test_file_path)] if torch.cuda.is_available() and (torch.cuda.device_count() > 1): args = ["--multi_gpu"] + args args = self.parser.parse_args(["--monitor_interval", "0.1", str(self.test_file_path)]) launch_command(args) def test_config_compatibility(self): invalid_configs = ["fp8", "invalid", "mpi", "sagemaker"] for config in sorted(self.test_config_path.glob("**/*.yaml")): if any(invalid_config in str(config) for invalid_config in invalid_configs): continue with self.subTest(config_file=config): args = self.parser.parse_args(["--config_file", str(config), str(self.test_file_path)]) launch_command(args) def test_invalid_keys(self): config_path = self.test_config_path / "invalid_keys.yaml" with self.assertRaises( ValueError, msg="The config file at 'invalid_keys.yaml' had unknown keys ('another_invalid_key', 'invalid_key')", ): args = self.parser.parse_args(["--config_file", str(config_path), str(self.test_file_path)]) launch_command(args) def test_accelerate_test(self): args = accelerate_test_cmd.test_command_parser().parse_args([]) accelerate_test_cmd.test_command(args) @require_multi_device def test_notebook_launcher(self): """ This test checks a variety of situations and scenarios with the `notebook_launcher` """ cmd = ["python", self.notebook_launcher_path] with patch_environment(omp_num_threads=1, accelerate_num_processes=2): run_command(cmd) def test_mpi_multicpu_config_cmd(self): """ Parses a launch command with a test file and the 0_28_0_mpi.yaml config. Tests getting the command and environment vars and verifies the mpirun command arg values. """ mpi_config_path = str(self.test_config_path / "0_28_0_mpi.yaml") test_file_arg = "--cpu" with patch("sys.argv", ["accelerate", str(self.test_file_path), test_file_arg]): parser = launch_command_parser() args = parser.parse_args() args.config_file = mpi_config_path args, _, _ = _validate_launch_command(args) # Mock out the check for mpirun version to simulate Intel MPI with patch("accelerate.utils.launch.which", return_value=True): with patch("accelerate.utils.launch.subprocess.check_output", return_value=b"Intel MPI"): cmd, _ = prepare_simple_launcher_cmd_env(args) # Verify the mpirun command args expected_mpirun_cmd = ["mpirun", "-f", "/home/user/hostfile", "-ppn", "4", "-n", "16"] self.assertGreater(len(cmd), len(expected_mpirun_cmd)) generated_mpirun_cmd = cmd[0 : len(expected_mpirun_cmd)] self.assertEqual(expected_mpirun_cmd, generated_mpirun_cmd) # Verify the python script and args in the mpirun command python_script_cmd = cmd[len(expected_mpirun_cmd) :] self.assertEqual(len(python_script_cmd), 3) self.assertEqual(python_script_cmd[1], str(self.test_file_path)) self.assertEqual(python_script_cmd[2], test_file_arg) def test_validate_launch_command(self): """Test that the validation function combines args and defaults.""" parser = launch_command_parser() args = parser.parse_args( [ "--num-processes", "2", "--deepspeed_config_file", "path/to/be/accepted", "--config-file", str(self.test_config_path / "validate_launch_cmd.yaml"), "test.py", ] ) self.assertFalse(args.debug) self.assertTrue(args.fsdp_sync_module_states) _validate_launch_command(args) self.assertTrue(args.debug) self.assertEqual(2, args.num_processes) self.assertFalse(args.fsdp_sync_module_states) self.assertEqual("path/to/be/accepted", args.deepspeed_config_file) class LaunchArgTester(unittest.TestCase): """ Test cases revolving around the CLI wrappers """ parser = launch_command_parser() def test_hyphen(self): # Try a little from each cluster args = ["--config-file", "test.yaml", "test.py"] result = self.parser.parse_args(args) assert result.config_file == "test.yaml" assert result.multi_gpu is False args = ["--multi-gpu", "--num-processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.num_processes == 4 # And use a mix args = ["--multi-gpu", "--use-deepspeed", "--use-fsdp", "--num_processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.use_deepspeed is True assert result.use_fsdp is True assert result.num_processes == 4 def test_underscore(self): # Try a little from each cluster args = ["--config_file", "test.yaml", "test.py"] result = self.parser.parse_args(args) assert result.config_file == "test.yaml" args = ["--multi_gpu", "--num_processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.num_processes == 4 # And use a mix args = ["--multi_gpu", "--use_deepspeed", "--use_fsdp", "--num-processes", "4", "test.py"] result = self.parser.parse_args(args) assert result.multi_gpu is True assert result.use_deepspeed is True assert result.use_fsdp is True assert result.num_processes == 4 def test_duplicate_entities(self): help_return = self.parser.format_help() args = self.parser.parse_args(["test.py"]) for arg in args.__dict__: if "_" in arg: bad_arg = f'--{arg.replace("_", "-")}' # Need an exception for `num-processes` since it's in the docstring if bad_arg == "--num-processes": assert help_return.count(bad_arg) == 1, f"Found {bad_arg} in `accelerate launch -h`" else: assert bad_arg not in help_return, f"Found {bad_arg} in `accelerate launch -h`" class ClusterConfigTester(unittest.TestCase): """ Test case for verifying the config dataclasses work """ test_config_path = Path("tests/test_configs") def test_base_config(self): # Tests that all the dataclasses can be initialized config = BaseConfig( compute_environment="LOCAL_MACHINE", distributed_type="NO", mixed_precision="fp16", debug=False, use_cpu=False, ) assert config.compute_environment == "LOCAL_MACHINE" assert config.distributed_type == "NO" assert config.mixed_precision == "fp16" assert config.debug is False def test_cluster_config(self): # First normally config = ClusterConfig( compute_environment="LOCAL_MACHINE", distributed_type="NO", mixed_precision="fp16", num_processes=2, debug=False, use_cpu=False, ) assert config.compute_environment == "LOCAL_MACHINE" assert config.distributed_type == "NO" assert config.mixed_precision == "fp16" assert config.debug is False # Then check with other compute environments config = ClusterConfig( compute_environment="LOCAL_MACHINE", distributed_type="MULTI_GPU", mixed_precision="fp16", debug=False, num_processes=2, enable_cpu_affinity=True, use_cpu=False, ) assert config.distributed_type == "MULTI_GPU" assert config.num_processes == 2 assert config.enable_cpu_affinity is True def test_sagemaker_config(self): config = SageMakerConfig( compute_environment="AMAZON_SAGEMAKER", distributed_type="NO", mixed_precision="fp16", debug=False, use_cpu=False, ec2_instance_type="MY_TYPE", iam_role_name="MY_ROLE", ) assert config.compute_environment == "AMAZON_SAGEMAKER" assert config.ec2_instance_type == "MY_TYPE" assert config.iam_role_name == "MY_ROLE" config = load_config_from_file(str(self.test_config_path / "0_30_0_sagemaker.yaml")) class TpuConfigTester(unittest.TestCase): """ Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command. """ tpu_name = "test-tpu" tpu_zone = "us-central1-a" command = "ls" cmd = ["accelerate", "tpu-config"] base_output = "cd /usr/share" command_file = "tests/test_samples/test_command_file.sh" gcloud = "Running gcloud compute tpus tpu-vm ssh" def setUp(self): self.parser = tpu_command_parser() def test_base(self): args = self.parser.parse_args( ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] ) output = capture_call_output(tpu_command_launcher, args) assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output def test_base_backward_compatibility(self): args = self.parser.parse_args( [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] ) output = capture_call_output(tpu_command_launcher, args) assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output def test_with_config_file(self): args = self.parser.parse_args(["--config_file", "tests/test_configs/latest.yaml", "--debug"]) output = capture_call_output(tpu_command_launcher, args) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_with_config_file_and_command(self): args = self.parser.parse_args( ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] ) output = capture_call_output(tpu_command_launcher, args) assert f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" in output def test_with_config_file_and_multiple_command(self): args = self.parser.parse_args( [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", 'echo "Hello World"', "--debug", ] ) output = capture_call_output(tpu_command_launcher, args) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' in output ) def test_with_config_file_and_command_file(self): args = self.parser.parse_args( ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] ) output = capture_call_output(tpu_command_launcher, args) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_with_config_file_and_command_file_backward_compatibility(self): args = self.parser.parse_args( [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] ) output = capture_call_output(tpu_command_launcher, args) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_accelerate_install(self): args = self.parser.parse_args( ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] ) output = capture_call_output(tpu_command_launcher, args) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' in output ) def test_accelerate_install_version(self): args = self.parser.parse_args( [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] ) output = capture_call_output(tpu_command_launcher, args) assert ( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' in output ) class ModelEstimatorTester(unittest.TestCase): """ Test case for checking the output of `accelerate estimate-memory` is correct. - Uses `estimate_command` when trying to catch raised errors - Uses `gather_data` when just verifying the calculations are correct """ parser = estimate_command_parser() def test_invalid_model_name(self): with self.assertRaises( RepositoryNotFoundError, msg="Repo for model `somebrokenname` does not exist on the Hub" ): args = self.parser.parse_args(["somebrokenname"]) estimate_command(args) @require_timm def test_invalid_model_name_timm(self): with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `timm` but"): args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "timm"]) estimate_command(args) @require_transformers def test_invalid_model_name_transformers(self): with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `transformers` but"): args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "transformers"]) estimate_command(args) def test_no_metadata(self): with self.assertRaises( ValueError, msg="Model `muellerzr/dummy` does not have any library metadata on the Hub" ): args = self.parser.parse_args(["muellerzr/dummy"]) estimate_command(args) def test_gated(self): with self.assertRaises( (GatedRepoError, EnvironmentError), msg="Repo for model `meta-llama/Llama-2-7b-hf` is gated or environment error occurred", ): args = self.parser.parse_args(["meta-llama/Llama-2-7b-hf"]) with patch_environment(hf_hub_disable_implicit_token="1"): estimate_command(args) @require_transformers def test_remote_code(self): # Also tests that custom `Auto` classes work args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model"]) with self.assertRaises(ValueError, msg="--trust_remote_code"): gather_data(args) # Verify it works with the flag args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model", "--trust_remote_code"]) gather_data(args) @require_transformers def test_explicit_dtypes(self): args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32", "float16"]) output = gather_data(args) # The largest layer and total size of the model in bytes largest_layer, total_size = 90669056, 433249280 # Check that full precision -> int4 is calculating correctly assert len(output) == 2, f"Output was missing a precision, expected 2 but received {len(output)}" for i, factor in enumerate([1, 2]): precision = 32 // factor precision_str = f"float{precision}" largest_layer_estimate = largest_layer / factor total_size_estimate = total_size / factor total_training_size_estimate = total_size_estimate * 4 assert precision_str == output[i][0], f"Output is missing precision `{precision_str}`" assert ( largest_layer_estimate == output[i][1] ), f"Calculation for largest layer size in `{precision_str}` is incorrect." assert ( total_size_estimate == output[i][2] ), f"Calculation for total size in `{precision_str}` is incorrect." assert total_training_size_estimate == max( output[i][3].values() ), f"Calculation for total training size in `{precision_str}` is incorrect." @require_transformers def test_transformers_model(self): args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32"]) output = gather_data(args) # The largest layer and total size of the model in bytes largest_layer, total_size = 90669056, 433249280 assert ( largest_layer == output[0][1] ), f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}" assert ( total_size == output[0][2] ), f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}" @require_transformers def test_no_split_modules(self): # idefics-80b-instruct has ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"] args = self.parser.parse_args(["HuggingFaceM4/idefics-80b-instruct", "--dtypes", "float32"]) output = gather_data(args) # without factoring in `no_split` modules, the largest layer is 721420288 bytes assert output[0][1] != 721420288, "Largest layer calculation incorrect, did not factor in `no_split` modules." # the real answer is 3240165632 bytes assert output[0][1] == 3240165632 @require_timm def test_timm_model(self): args = self.parser.parse_args(["timm/resnet50.a1_in1k", "--library_name", "timm"]) output = gather_data(args) # The largest layer and total size of the model in bytes largest_layer, total_size = 9437184, 102441032 assert ( largest_layer == output[0][1] ), f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}" assert ( total_size == output[0][2] ), f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}"
accelerate/tests/test_cli.py/0
{ "file_path": "accelerate/tests/test_cli.py", "repo_id": "accelerate", "token_count": 9678 }
11
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys from accelerate.test_utils import require_transformer_engine from accelerate.test_utils.testing import TempDirTestCase, require_import_timer from accelerate.utils import is_import_timer_available if is_import_timer_available(): from import_timer import calculate_total_time, read_import_profile from import_timer.core import get_paths_above_threshold, sort_nodes_by_total_time def convert_list_to_string(data): end_result = "" arrow_right = "->" for path in data: end_result += f"{arrow_right.join(path[0])} {path[1]:.3f}s\n" return end_result def run_import_time(command: str): output = subprocess.run([sys.executable, "-X", "importtime", "-c", command], capture_output=True, text=True) return output.stderr @require_import_timer class ImportSpeedTester(TempDirTestCase): """ Test suite which checks if imports have seen slowdowns based on a particular baseline. If the error messages are not clear enough to get a full view of what is slowing things down (or to figure out how deep the initial depth should be), please view the profile with the `tuna` framework: `tuna import.log`. """ clear_on_setup = False @classmethod def setUpClass(cls): super().setUpClass() output = run_import_time("import torch") data = read_import_profile(output) total_time = calculate_total_time(data) cls.pytorch_time = total_time def test_base_import(self): output = run_import_time("import accelerate") data = read_import_profile(output) total_time = calculate_total_time(data) pct_more = (total_time - self.pytorch_time) / self.pytorch_time * 100 # Base import should never be more than 20% slower than raw torch import err_msg = f"Base import is more than 20% slower than raw torch import ({pct_more:.2f}%), please check the attached `tuna` profile:\n" sorted_data = sort_nodes_by_total_time(data) paths_above_threshold = get_paths_above_threshold(sorted_data, 0.05, max_depth=7) err_msg += f"\n{convert_list_to_string(paths_above_threshold)}" self.assertLess(pct_more, 20, err_msg) def test_cli_import(self): output = run_import_time("from accelerate.commands.launch import launch_command_parser") data = read_import_profile(output) total_time = calculate_total_time(data) pct_more = (total_time - self.pytorch_time) / self.pytorch_time * 100 # Base import should never be more than 20% slower than raw torch import err_msg = f"Base import is more than 20% slower than raw torch import ({pct_more:.2f}%), please check the attached `tuna` profile:\n" sorted_data = sort_nodes_by_total_time(data) paths_above_threshold = get_paths_above_threshold(sorted_data, 0.05, max_depth=7) err_msg += f"\n{convert_list_to_string(paths_above_threshold)}" self.assertLess(pct_more, 20, err_msg) @require_transformer_engine class LazyImportTester(TempDirTestCase): """ Test suite which checks if specific packages are lazy-loaded. Eager-import will trigger circular import in some case, e.g. in huggingface/accelerate#3056. """ def test_te_import(self): output = run_import_time("import accelerate, accelerate.utils.transformer_engine") self.assertFalse(" transformer_engine" in output, "`transformer_engine` should not be imported on import")
accelerate/tests/test_imports.py/0
{ "file_path": "accelerate/tests/test_imports.py", "repo_id": "accelerate", "token_count": 1442 }
12
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
30