text
stringlengths
7
3.71M
id
stringlengths
12
166
metadata
dict
__index_level_0__
int64
0
658
.PHONY: quality style test docs utils check_dirs := . # Check that source code meets quality standards extra_quality_checks: python utils/check_copies.py python utils/check_dummies.py python utils/check_repo.py doc-builder style src/accelerate docs/source --max_len 119 # this target runs checks on all files quality: ruff check $(check_dirs) ruff format --check $(check_dirs) doc-builder style src/accelerate docs/source --max_len 119 --check_only # Format source code automatically and check is there are any problems left that need manual fixing style: ruff check $(check_dirs) --fix ruff format $(check_dirs) doc-builder style src/accelerate docs/source --max_len 119 # Run tests for the library test_big_modeling: python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",) test_core: python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \ --ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",) test_cli: python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",) test_deepspeed: python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_deepspeed.log",) test_fsdp: python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",) # Since the new version of pytest will *change* how things are collected, we need `deepspeed` to # run after test_core and test_cli test: $(MAKE) test_core $(MAKE) test_cli $(MAKE) test_big_modeling $(MAKE) test_deepspeed $(MAKE) test_fsdp test_examples: python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",) # Broken down example tests for the CI runners test_integrations: python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",) test_example_differences: python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",) test_checkpoint_epoch: python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_epoch.log",) test_checkpoint_step: python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_step.log",) # Same as test but used to install only the base dependencies test_prod: $(MAKE) test_core test_rest: python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",)
accelerate/Makefile/0
{ "file_path": "accelerate/Makefile", "repo_id": "accelerate", "token_count": 1111 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Launching Multi-GPU Training from a Jupyter Environment This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system. You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. <Tip> This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) </Tip> ## Configuring the Environment Before any training can be performed, a 🤗 Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: ```bash accelerate config ``` However, if general defaults are fine and you are *not* running on a TPU, 🤗Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. <Tip warning={true}> CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed. </Tip> ```python import os from accelerate.utils import write_basic_config write_basic_config() # Write a config file os._exit(00) # Restart the notebook ``` ## Preparing the Dataset and Model Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example) ```python import os, re, torch, PIL import numpy as np from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed from timm import create_model ``` First you need to create a function to extract the class name based on a filename: ```python import os data_dir = "../../images" fnames = os.listdir(data_dir) fname = fnames[0] print(fname) ``` ```python out beagle_32.jpg ``` In the case here, the label is `beagle`. Using regex you can extract the label from the filename: ```python import re def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] ``` ```python extract_label(fname) ``` And you can see it properly returned the right name for our file: ```python out "beagle" ``` Next a `Dataset` class should be made to handle grabbing the image and the label: ```python class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} ``` Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the launched function: ```python fnames = [os.path.join("../../images", fname) for fname in fnames if fname.endswith(".jpg")] ``` Next gather all the labels: ```python all_labels = [extract_label(fname) for fname in fnames] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} ``` Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. ```python def get_dataloaders(batch_size: int = 64): "Builds a set of dataloaders with a batch_size" random_perm = np.random.permutation(len(fnames)) cut = int(0.8 * len(fnames)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training a simple RandomResizedCrop will be used train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id) # For evaluation a deterministic Resize will be used eval_tfm = Compose([Resize((224, 224)), ToTensor()]) eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4) return train_dataloader, eval_dataloader ``` Finally, you should import the scheduler to be used later: ```python from torch.optim.lr_scheduler import CosineAnnealingLR ``` ## Writing the Training Function Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system. Here is a basic training loop for the animal classification problem: <Tip> The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end </Tip> ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) accelerator = Accelerator(mixed_precision=mixed_precision) ``` First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible. <Tip warning={true}> If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) to learn why </Tip> Next you should build your dataloaders and create your model: ```python train_dataloader, eval_dataloader = get_dataloaders(batch_size) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) ``` <Tip> You build the model here so that the seed also controls the new weight initialization </Tip> As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be trained only initially: ```python for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True ``` Normalizing the batches of images will make training a little faster: ```python mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] ``` To make these constants available on the active device, you should set it to the Accelerator's device: ```python mean = mean.to(accelerator.device) std = std.to(accelerator.device) ``` Next instantiate the rest of the PyTorch classes used for training: ```python optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) ``` Before passing everything to [`~Accelerator.prepare`]. <Tip> There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method. </Tip> ```python model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) ``` Now train the model: ```python for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall total accuracy of each batch will be added to two constants: ```python model.eval() accurate = 0 num_elems = 0 ``` Next you have the rest of your standard PyTorch loop: ```python for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) ``` Before finally the last major difference. When performing distributed evaluation, the predictions and labels need to be passed through [`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved: ```python accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() ``` Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]: ```python eval_metric = accurate.item() / num_elems accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` A full version of this training loop is available below: ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) # Initialize accelerator accelerator = Accelerator(mixed_precision=mixed_precision) # Build dataloaders train_dataloader, eval_dataloader = get_dataloaders(batch_size) # Instantiate the model (you build the model here so that the seed also controls new weight initaliziations) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # Freeze the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # You can normalize the batches of images to be a bit faster mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] # To make these constants available on the active device, set it to the accelerator device mean = mean.to(accelerator.device) std = std.to(accelerator.device) # Instantiate the optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) # Instantiate the learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now you train the model for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` ## Using the notebook_launcher All that's left is to use the [`notebook_launcher`]. You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information) ```python from accelerate import notebook_launcher ``` ```python args = ("fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=2) ``` In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time. For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so: ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8) ``` And in the second Jupyter session on the other machine: <Tip> Notice how the `node_rank` has changed </Tip> ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8) ``` In the case of running on the TPU, it would look like so: ```python model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) args = (model, "fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=8) ``` As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs: ```python out Launching training on 2 GPUs. epoch 0: 88.12 epoch 1: 91.73 epoch 2: 92.58 epoch 3: 93.90 epoch 4: 94.71 ``` And that's it! Please note that [`notebook_launcher`] ignores the 🤗 Accelerate config file, to launch based on the config use: ```bash accelerate launch ``` ## Debugging A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong, you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards). ## Conclusion This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember: - Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`] - Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc) - If using the TPU, declare your model outside the training loop function
accelerate/docs/source/basic_tutorials/notebook.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/notebook.md", "repo_id": "accelerate", "token_count": 5583 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are: 1. Optimizer state partitioning (ZeRO stage 1) 2. Gradient partitioning (ZeRO stage 2) 3. Parameter partitioning (ZeRO stage 3) 4. Custom mixed precision training handling 5. A range of fast CUDA-extension-based optimizers 6. ZeRO-Offload to CPU and Disk/NVMe 7. Hierarchical partitioning of model parameters (ZeRO++) ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857). DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference. DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which won't be possible on a single GPU. 🤗 Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options: 1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. User may have to change a few lines of code depending on the config. 2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed. ## What is integrated? Training: 1. 🤗 Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++. Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/) ![ZeRO Data Parallelism](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) (Source: [link](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)) a. **Stage 1** : Shards optimizer states across data parallel workers/GPUs b. **Stage 2** : Shards optimizer states + gradients across data parallel workers/GPUs c. **Stage 3**: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs d. **Optimizer Offload**: Offloads the gradients + optimizer states to CPU/Disk building on top of ZERO Stage 2 e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3 f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3. <u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk Inference: 1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: [deepspeed-zero-inference](#deepspeed-zero-inference). ## How it works? **Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation) for more information. We will first look at easy to use integration via `accelerate config`. Followed by more flexible and feature rich `deepspeed config file` integration. ### Accelerate DeepSpeed Plugin On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with DeepSpeed Plugin: **ZeRO Stage-2 DeepSpeed Plugin Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero_stage: 2 distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py --mixed_precision fp16 ``` **ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py --mixed_precision fp16 ``` Currently, `Accelerate` supports following config through the CLI: ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. `gradient_clipping`: Enable gradient clipping with value. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. `offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. `offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. `deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... `deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources. `deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup. `deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup. `deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`. `deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this. ``` To be able to tweak more options, you will need to use a DeepSpeed config file. ### DeepSpeed Config File On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes and provide the path to the deepspeed config file. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File: **ZeRO Stage-2 DeepSpeed Config File Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json zero3_init_flag: true distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` with the contents of `zero_stage2_config.json` being: ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ```bash accelerate launch examples/by_feature/deepspeed_with_config_support.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name "wikitext" \ --dataset_config_name "wikitext-2-raw-v1" \ --block_size 128 \ --output_dir "./clm/clm_deepspeed_stage2_accelerate" \ --learning_rate 5e-4 \ --per_device_train_batch_size 24 \ --per_device_eval_batch_size 24 \ --num_train_epochs 3 \ --with_tracking \ --report_to "wandb"\ ``` **ZeRO Stage-3 with CPU offload DeepSpeed Config File Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json zero3_init_flag: true distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` with the contents of `zero_stage3_offload_config.json` being: ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ```bash accelerate launch examples/by_feature/deepspeed_with_config_support.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name "wikitext" \ --dataset_config_name "wikitext-2-raw-v1" \ --block_size 128 \ --output_dir "./clm/clm_deepspeed_stage3_offload_accelerate" \ --learning_rate 5e-4 \ --per_device_train_batch_size 32 \ --per_device_eval_batch_size 32 \ --num_train_epochs 3 \ --with_tracking \ --report_to "wandb"\ ``` **ZeRO++ Config Example** You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/): ```json { "zero_optimization": { "stage": 3, "reduce_bucket_size": "auto", "zero_quantized_weights": true, "zero_hpz_partition_size": 8, "zero_quantized_gradients": true, "contiguous_gradients": true, "overlap_comm": true } } ``` For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node) **Important code changes when using DeepSpeed Config File** 1. DeepSpeed Optimizers and Schedulers. For more information on these, see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation. We will look at the changes needed in the code when using these. a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys are present in the DeepSpeed config file. In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code. Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer optimizer_cls = ( torch.optim.AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate) # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) else: lr_scheduler = DummyScheduler( optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps ) ``` b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin. In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. This will result in an error because you can only use DS Scheduler when using DS Optim. 2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user. The `auto` values are calculated as: - `reduce_bucket_size`: `hidden_size * hidden_size` - `stage3_prefetch_bucket_size`: `0.9 * hidden_size * hidden_size` - `stage3_param_persistence_threshold`: `10 * hidden_size` For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off. **Things to note when using DeepSpeed Config File** Below is a sample script using `deepspeed_config_file` in different scenarios. Code `test.py`: ```python from accelerate import Accelerator from accelerate.state import AcceleratorState def main(): accelerator = Accelerator() accelerator.print(f"{AcceleratorState()}") if __name__ == "__main__": main() ``` **Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries. 1. Content of the `accelerate` config: ```yaml command_file: null commands: null compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: 'cpu' offload_param_device: 'cpu' zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 deepspeed_config_file: 'ds_config.json' distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} gpu_ids: null machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_name: null tpu_zone: null use_cpu: false ``` 2. `ds_config.json`: ```json { "bf16": { "enabled": true }, "zero_optimization": { "stage": 3, "stage3_gather_16bit_weights_on_model_save": false, "offload_optimizer": { "device": "none" }, "offload_param": { "device": "none" } }, "gradient_clipping": 1.0, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": 10, "steps_per_print": 2000000 } ``` 3. Output of `accelerate launch test.py`: ```bash ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: ['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', 'zero3_save_16bit_model', 'mixed_precision']. Please specify them appropriately in the DeepSpeed config file. If you are using an accelerate config file, remove other config variables mentioned in the above specified list. The easiest method is to create a new config following the questionnaire via `accelerate config`. It will only ask for the necessary config variables when using `deepspeed_config_file`. ``` **Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown. 1. Run `accelerate config`: ```bash $ accelerate config ------------------------------------------------------------------------------------------------------------------------------- In which compute environment are you running? This machine ------------------------------------------------------------------------------------------------------------------------------- Which type of machine are you using? multi-GPU How many different machines will you use (use more than 1 for multi-node training)? [1]: Do you wish to optimize your script with torch dynamo?[yes/NO]: Do you want to use DeepSpeed? [yes/NO]: yes Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes Please enter the path to the json DeepSpeed config file: ds_config.json Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes How many GPU(s) should be used for distributed training? [1]:4 accelerate configuration saved at ds_config_sample.yaml ``` 2. Content of the `accelerate` config: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: ds_config.json zero3_init_flag: true distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true use_cpu: false ``` 3. Output of `accelerate launch test.py`: ```bash Distributed environment: DEEPSPEED Backend: nccl Num processes: 4 Process index: 0 Local process index: 0 Device: cuda:0 Mixed precision type: bf16 ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}} ``` **Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected. 1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments: ```json { "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": "auto", "stage3_gather_16bit_weights_on_model_save": "auto", "offload_optimizer": { "device": "auto" }, "offload_param": { "device": "auto" } }, "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto", "steps_per_print": 2000000 } ``` 2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`: ```bash Distributed environment: DEEPSPEED Backend: nccl Num processes: 4 Process index: 0 Local process index: 0 Device: cuda:0 Mixed precision type: fp16 ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}} ``` **Note**: 1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of `Important code changes when using DeepSpeed Config File`. 2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object. ## Saving and loading 1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2. 2. under ZeRO Stage-3, `state_dict` contains just the placeholders since the model weights are partitioned across multiple GPUs. ZeRO Stage-3 has 2 options: a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`. For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set `zero3_save_16bit_model` to True in DeepSpeed Plugin. **Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.** Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python unwrapped_model = accelerator.unwrap_model(model) # New Code # # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or # `zero3_save_16bit_model` is True in DeepSpeed Plugin. # For Zero Stages 1 and 2, models are saved as usual in the output directory. # The model name saved is `pytorch_model.bin` unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model), ) ``` b. To get 32bit weights, first save the model using `model.save_checkpoint()`. Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict) status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}" if success: logging.info(f"Success {status_msg}") else: logging.warning(f"Failure {status_msg}") ``` This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory. You can use this script to do offline consolidation. It requires no configuration files or GPUs. Here is an example of its usage: ```bash $ cd /path/to/checkpoint_dir $ ./zero_to_fp32.py . pytorch_model.bin Processing zero checkpoint at global_step1 Detected checkpoint of type zero stage 3, world_size: 2 Saving fp32 state dict to pytorch_model.bin (total_numel=60506624) ``` To get 32bit model for saving/inference, you can perform: ```python from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint unwrapped_model = accelerator.unwrap_model(model) fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir) ``` If you are only interested in the `state_dict`, you can do the following: ```python from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) ``` Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint. ## ZeRO Inference DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. With accelerate integration, you just need to prepare the model and dataloader as shown below: ```python model, eval_dataloader = accelerator.prepare(model, eval_dataloader) ``` ## Few caveats to be aware of 1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed. 2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM. 3. Current integration doesn’t support multiple models. ## DeepSpeed Resources The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed). - [Project's github](https://github.com/microsoft/deepspeed) - [Usage docs](https://www.deepspeed.ai/getting-started/) - [API docs](https://deepspeed.readthedocs.io/en/latest/index.html) - [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed) Papers: - [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) - [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840) - [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857) - [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209) Finally, please, remember that 🤗 `Accelerate` only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues). <Tip> For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed.md)! </Tip>
accelerate/docs/source/usage_guides/deepspeed.md/0
{ "file_path": "accelerate/docs/source/usage_guides/deepspeed.md", "repo_id": "accelerate", "token_count": 10168 }
2
# What are these scripts? All scripts in this folder originate from the `nlp_example.py` file, as it is a very simplistic NLP training example using Accelerate with zero extra features. From there, each further script adds in just **one** feature of Accelerate, showing how you can quickly modify your own scripts to implement these capabilities. A full example with all of these parts integrated together can be found in the `complete_nlp_example.py` script and `complete_cv_example.py` script. Adjustments to each script from the base `nlp_example.py` file can be found quickly by searching for "# New Code #" ## Example Scripts by Feature and their Arguments ### Base Example (`../nlp_example.py`) - Shows how to use `Accelerator` in an extremely simplistic PyTorch training loop - Arguments available: - `mixed_precision`, whether to use mixed precision. ("no", "fp16", or "bf16") - `cpu`, whether to train using only the CPU. (yes/no/1/0) All following scripts also accept these arguments in addition to their added ones. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0 ``` ### Checkpointing and Resuming Training (`checkpointing.py`) - Shows how to use `Accelerator.save_state` and `Accelerator.load_state` to save or continue training - **It is assumed you are continuing off the same training script** - Arguments available: - `checkpointing_steps`, after how many steps the various states should be saved. ("epoch", 1, 2, ...) - `output_dir`, where saved state folders should be saved to, default is current working directory - `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...) These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: (Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag) ```bash accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "checkpointing_tutorial" --resume_from_checkpoint "checkpointing_tutorial/epoch_0" ``` ### Cross Validation (`cross_validation.py`) - Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`. - Arguments available: - `num_folds`, the number of folds the training dataset should be split into. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./cross_validation.py --num_folds 2 ``` ### Experiment Tracking (`tracking.py`) - Shows how to use `Accelerate.init_trackers` and `Accelerator.log` - Can be used with Weights and Biases, TensorBoard, or CometML. - Arguments available: - `with_tracking`, whether to load in all available experiment trackers from the environment. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./tracking.py --with_tracking ``` ### Gradient Accumulation (`gradient_accumulation.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. - Arguments available: - `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5 ``` ### LocalSGD (`local_sgd.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. However, unlike gradient accumulation, this method does not change the effective batch size. Local SGD can be combined with gradient accumulation. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./local_sgd.py --local_sgd_steps 4 ```
accelerate/examples/by_feature/README.md/0
{ "file_path": "accelerate/examples/by_feature/README.md", "repo_id": "accelerate", "token_count": 1218 }
3
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a ResNet50 on the Oxford-IIT Pet Dataset # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## # Function to get the label from the filename def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} def training_function(config, args): # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) image_size = config["image_size"] if not isinstance(image_size, (list, tuple)): image_size = (image_size, image_size) # Grab all the image filenames file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] # Build the label correspondences all_labels = [extract_label(fname) for fname in file_names] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} # Set the seed before splitting the data. np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # Split our filenames between train and validation random_perm = np.random.permutation(len(file_names)) cut = int(0.8 * len(file_names)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training we use a simple RandomResizedCrop train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset( [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation, we use a deterministic Resize eval_tfm = Compose([Resize(image_size), ToTensor()]) eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders. train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Freezing the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # We normalize the batches of images to be a bit faster. mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) # Instantiate optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) # Instantiate learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for _, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) accurate_preds = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument("--data_dir", required=True, help="The data folder on disk.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/cv_example.py/0
{ "file_path": "accelerate/examples/cv_example.py", "repo_id": "accelerate", "token_count": 3205 }
4
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage7(Scene): def construct(self): # The dataset items code = Code( code="accelerator = Accelerator(dispatch_batches=True)\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...", tab_width=4, background="window", language="Python", font="Monospace", font_size=14, corner_radius=.2, insert_line_no=False, line_spacing=.75, style=Code.styles_list[1], ) code.move_to([-3.5, 2.5, 0]) self.add(code) colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"] fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) columns = [ VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0) for j in range(4) ] dataset_recs = VGroup(*columns).arrange(UP, buff=0) dataset_text = Text("Dataset", font_size=24) dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) dataset.move_to([-2,0,0]) self.add(dataset) # The dataloader itself sampler_1 = Group( Rectangle(color="blue", height=1.02, width=1.02), Text("Sampler GPU 1", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_2 = Group( Rectangle(color="blue", height=1.02, width=1.02), Text("Sampler GPU 2", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_3 = Group( Rectangle(color="blue", height=1.02, width=1.02), Text("Sampler GPU 3", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_4 = Group( Rectangle(color="blue", height=1.02, width=1.02), Text("Sampler GPU 4", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN) sampler_1.move_to([2,2,0]) sampler_2.move_to([2,.5,0]) sampler_3.move_to([2,-1.,0]) sampler_4.move_to([2,-2.5,0]) self.add(sampler_1, sampler_2, sampler_3, sampler_4) samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]] gpu_1 = Group( Rectangle(color="white", height=1.02, width=.98), Text("Output GPU 1", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0]) gpu_2 = Group( Rectangle(color="white", height=1.02, width=.98), Text("Output GPU 2", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0]) gpu_3 = Group( Rectangle(color="white", height=1.02, width=.98), Text("Output GPU 3", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0]) gpu_4 = Group( Rectangle(color="white", height=1.02, width=.98), Text("Output GPU 4", font_size=12) ).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0]) gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]] self.add(gpu_1, gpu_2, gpu_3, gpu_4) step_1 = MarkupText( f"When using a `DataLoaderDispatcher`, all\nof the samples are collected from GPU 0's dataset,\nthen divided and sent to each GPU.\nAs a result, this will be slower.", font_size=18 ) step_1.move_to([-2.5, -2, 0]) self.play( Write(step_1, run_time=3.5), ) first_animations = [] second_animations = [] colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"] current_color = colors[0] ud_buff = 0.01 lr_buff = 0.01 old_target = None new_datasets = [] for i,row_data in enumerate(dataset_recs): new_row = [] current_color = colors[i] for j,indiv_data in enumerate(row_data): dataset_target = Rectangle(height=0.46/4,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7) dataset_target.move_to(indiv_data) dataset_target.generate_target() aligned_edge = ORIGIN if j % 8 == 0: aligned_edge = LEFT dataset_target.target.next_to( samplers[0].get_corner(DOWN+LEFT), buff=0.0125, direction=RIGHT+UP, ) dataset_target.target.set_x(dataset_target.target.get_x()) dataset_target.target.set_y(dataset_target.target.get_y() + (.25 * i)) elif j % 4 == 0: old_target = dataset_target.target dataset_target.target.next_to( samplers[0].get_corner(DOWN+LEFT), buff=0.0125, direction=RIGHT+UP, ) dataset_target.target.set_x(dataset_target.target.get_x()) dataset_target.target.set_y(dataset_target.target.get_y()+.125 + (.25 * i)) else: dataset_target.target.next_to( old_target, direction=RIGHT, buff=0.0125, ) old_target = dataset_target.target new_row.append(dataset_target) first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color)) second_animations.append(MoveToTarget(dataset_target, run_time=1.5)) new_datasets.append(new_row) self.play( *first_animations, ) self.play(*second_animations) move_animation = [] for i,row in enumerate(new_datasets): current_color = colors[i] if i == 0: idx = -3 elif i == 1: idx = -2 elif i == 2: idx = -1 elif i == 3: idx = 0 for j,indiv_data in enumerate(row): indiv_data.generate_target() indiv_data.animate.stretch_to_fit_height(0.46/2) aligned_edge = ORIGIN if j % 8 == 0: aligned_edge = LEFT indiv_data.target.next_to( gpus[abs(idx)].get_corner(UP+LEFT), buff=.01, direction=RIGHT+DOWN, ) indiv_data.target.set_x(indiv_data.target.get_x()) indiv_data.target.set_y(indiv_data.target.get_y()-.25) elif j % 4 == 0: indiv_data.target.next_to( gpus[abs(idx)].get_corner(UP+LEFT), buff=.01, direction=RIGHT+DOWN, ) indiv_data.target.set_x(indiv_data.target.get_x()) else: indiv_data.target.next_to( old_target, direction=RIGHT, buff=0.01, ) old_target = indiv_data.target move_animation.append(MoveToTarget(indiv_data, run_time=1.5)) self.play(*move_animation) self.wait()
accelerate/manim_animations/dataloaders/stage_7.py/0
{ "file_path": "accelerate/manim_animations/dataloaders/stage_7.py", "repo_id": "accelerate", "token_count": 4184 }
5
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from .config_args import default_config_file, load_config_from_file from .config_utils import SubcommandHelpFormatter description = "Update an existing config file with the latest defaults while maintaining the old configuration." def update_config(args): """ Update an existing config file with the latest defaults while maintaining the old configuration. """ config_file = args.config_file if config_file is None and Path(default_config_file).exists(): config_file = default_config_file elif not Path(config_file).exists(): raise ValueError(f"The passed config file located at {config_file} doesn't exist.") config = load_config_from_file(config_file) if config_file.endswith(".json"): config.to_json_file(config_file) else: config.to_yaml_file(config_file) return config_file def update_command_parser(parser, parents): parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=None, help=( "The path to the config file to update. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) parser.set_defaults(func=update_config_command) return parser def update_config_command(args): config_file = update_config(args) print(f"Sucessfully updated the configuration file at {config_file}.")
accelerate/src/accelerate/commands/config/update.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/update.py", "repo_id": "accelerate", "token_count": 774 }
6
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import torch from .state import AcceleratorState, PartialState from .utils import ( PrecisionType, PrepareForLaunch, are_libraries_initialized, check_cuda_p2p_ib_support, get_gpu_info, is_mps_available, patch_environment, ) def test_launch(): "Verify a `PartialState` can be initialized." _ = PartialState() def notebook_launcher( function, args=(), num_processes=None, mixed_precision="no", use_port="29500", master_addr="127.0.0.1", node_rank=0, num_nodes=1, ): """ Launches a training function, using several processes or multiple nodes if it's possible in the current environment (TPU with multiple cores for instance). <Tip warning={true}> To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none of those calls have been made. </Tip> Args: function (`Callable`): The training function to execute. If it accepts arguments, the first argument should be the index of the process run. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*): The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to the number of GPUs available otherwise. mixed_precision (`str`, *optional*, defaults to `"no"`): If `fp16` or `bf16`, will use mixed precision training on multi-GPU. use_port (`str`, *optional*, defaults to `"29500"`): The port to use to communicate between processes when launching a multi-GPU training. master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): The address to use for communication between processes. node_rank (`int`, *optional*, defaults to 0): The rank of the current node. num_nodes (`int`, *optional*, defaults to 1): The number of nodes to use for training. Example: ```python # Assume this is defined in a Jupyter Notebook on an instance with two GPUs from accelerate import notebook_launcher def train(*args): # Your training function here ... notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") ``` """ # Are we in a google colab or a Kaggle Kernel? in_colab = False in_kaggle = False if any(key.startswith("KAGGLE") for key in os.environ.keys()): in_kaggle = True elif "IPython" in sys.modules: in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) try: mixed_precision = PrecisionType(mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: num_processes = 8 launcher = PrepareForLaunch(function, distributed_type="TPU") print(f"Launching a training on {num_processes} TPU cores.") xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork") elif in_colab and get_gpu_info()[1] < 2: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on one CPU.") function(*args) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if node_rank >= num_nodes: raise ValueError("The node_rank must be less than the number of nodes.") if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) # Check for specific libraries known to initialize CUDA that users constantly use problematic_imports = are_libraries_initialized("bitsandbytes") if len(problematic_imports) > 0: err = ( "Could not start distributed process. Libraries known to initialize CUDA upon import have been " "imported already. Please keep these imports inside your training function to try and help with this:" ) for lib_name in problematic_imports: err += f"\n\t* `{lib_name}`" raise RuntimeError(err) patched_env = dict( nproc=num_processes, node_rank=node_rank, world_size=num_nodes * num_processes, master_addr=master_addr, master_port=use_port, mixed_precision=mixed_precision, ) # Check for CUDA P2P and IB issues if not check_cuda_p2p_ib_support(): patched_env["nccl_p2p_disable"] = "1" patched_env["nccl_ib_disable"] = "1" # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment(**patched_env): # First dummy launch if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true": launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU") try: start_processes(launcher, args=(), nprocs=num_processes, start_method="fork") except ProcessRaisedException as e: err = "An issue was found when verifying a stable environment for the notebook launcher." if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( f"{err}" "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic and causing CUDA to be initialized." ) from e else: raise RuntimeError(f"{err} The following error was raised: {e}") from e # Now the actual launch launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") print(f"Launching training on {num_processes} GPUs.") try: start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic and causing CUDA to be initialized." ) from e else: raise RuntimeError(f"An issue was found when launching the training: {e}") from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" print("Launching training on MPS.") elif torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on CPU.") function(*args) def debug_launcher(function, args=(), num_processes=2): """ Launches a training function using several processes on CPU for debugging purposes. <Tip warning={true}> This function is provided for internal testing and debugging, but it's not intended for real trainings. It will only use the CPU. </Tip> Args: function (`Callable`): The training function to execute. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*, defaults to 2): The number of processes to use for training. """ from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=num_processes, master_addr="127.0.0.1", master_port="29500", accelerate_mixed_precision="no", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="yes", ): launcher = PrepareForLaunch(function, debug=True) start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
accelerate/src/accelerate/launchers.py/0
{ "file_path": "accelerate/src/accelerate/launchers.py", "repo_id": "accelerate", "token_count": 4812 }
7
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch.distributed from accelerate.test_utils import require_huggingface_suite from accelerate.utils import is_transformers_available if is_transformers_available(): from transformers import AutoModel, TrainingArguments GPT2_TINY = "sshleifer/tiny-gpt2" @require_huggingface_suite def init_torch_dist_then_launch_deepspeed(): torch.distributed.init_process_group(backend="nccl") deepspeed_config = { "zero_optimization": { "stage": 3, }, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } train_args = TrainingArguments( output_dir="./", deepspeed=deepspeed_config, ) model = AutoModel.from_pretrained(GPT2_TINY) assert train_args is not None assert model is not None def main(): init_torch_dist_then_launch_deepspeed() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py", "repo_id": "accelerate", "token_count": 517 }
8
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from ..logging import get_logger from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME from .imports import is_torch_distributed_available from .modeling import is_peft_model from .versions import is_torch_version if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available(): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType logger = get_logger(__name__) def _get_model_state_dict(model, adapter_only=False): if adapter_only and is_peft_model(model): from peft import get_peft_model_state_dict return get_peft_model_state_dict(model, adapter_name=model.active_adapter) else: return model.state_dict() def _set_model_state_dict(model, state_dict, adapter_only=False): if adapter_only and is_peft_model(model): from peft import set_peft_model_state_dict return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter) else: return model.load_state_dict(state_dict) def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False): os.makedirs(output_dir, exist_ok=True) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT # so, only enable it when num_processes>1 is_multi_process = accelerator.num_processes > 1 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process fsdp_plugin.state_dict_config.rank0_only = is_multi_process with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): state_dict = _get_model_state_dict(model, adapter_only=adapter_only) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" output_model_file = os.path.join(output_dir, weights_name) if accelerator.process_index == 0: logger.info(f"Saving model to {output_model_file}") torch.save(state_dict, output_model_file) logger.info(f"Model saved to {output_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: weights_name = ( f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) output_model_file = os.path.join(output_dir, weights_name) logger.info(f"Saving model to {output_model_file}") torch.save(state_dict, output_model_file) logger.info(f"Model saved to {output_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}") os.makedirs(ckpt_dir, exist_ok=True) logger.info(f"Saving model to {ckpt_dir}") state_dict = {"model": state_dict} dist_cp.save_state_dict( state_dict=state_dict, storage_writer=dist_cp.FileSystemWriter(ckpt_dir), planner=DefaultSavePlanner(), ) logger.info(f"Model saved to {ckpt_dir}") def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False): accelerator.wait_for_everyone() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT # so, only enable it when num_processes>1 is_multi_process = accelerator.num_processes > 1 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process fsdp_plugin.state_dict_config.rank0_only = is_multi_process with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(model) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin" input_model_file = os.path.join(input_dir, weights_name) logger.info(f"Loading model from {input_model_file}") state_dict = torch.load(input_model_file) logger.info(f"Model loaded from {input_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: weights_name = ( f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) input_model_file = os.path.join(input_dir, weights_name) logger.info(f"Loading model from {input_model_file}") state_dict = torch.load(input_model_file) logger.info(f"Model loaded from {input_model_file}") elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: ckpt_dir = ( os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}") if f"{FSDP_MODEL_NAME}" not in input_dir else input_dir ) logger.info(f"Loading model from {ckpt_dir}") state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only)} dist_cp.load_state_dict( state_dict=state_dict, storage_reader=dist_cp.FileSystemReader(ckpt_dir), planner=DefaultLoadPlanner(), ) state_dict = state_dict["model"] logger.info(f"Model loaded from {ckpt_dir}") load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only) return load_result def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0): os.makedirs(output_dir, exist_ok=True) with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): optim_state = FSDP.optim_state_dict(model, optimizer) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: optim_state_name = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) output_optimizer_file = os.path.join(output_dir, optim_state_name) logger.info(f"Saving Optimizer state to {output_optimizer_file}") torch.save(optim_state, output_optimizer_file) logger.info(f"Optimizer state saved in {output_optimizer_file}") else: ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") os.makedirs(ckpt_dir, exist_ok=True) logger.info(f"Saving Optimizer state to {ckpt_dir}") dist_cp.save_state_dict( state_dict={"optimizer": optim_state}, storage_writer=dist_cp.FileSystemWriter(ckpt_dir), planner=DefaultSavePlanner(), ) logger.info(f"Optimizer state saved in {ckpt_dir}") def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False): accelerator.wait_for_everyone() with FSDP.state_dict_type( model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: optim_state = None if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: optimizer_name = ( f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) input_optimizer_file = os.path.join(input_dir, optimizer_name) logger.info(f"Loading Optimizer state from {input_optimizer_file}") optim_state = torch.load(input_optimizer_file) logger.info(f"Optimizer state loaded from {input_optimizer_file}") else: ckpt_dir = ( os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}") if f"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(f"Loading Optimizer from {ckpt_dir}") optim_state = load_sharded_optimizer_state_dict( model_state_dict=_get_model_state_dict(model, adapter_only=adapter_only), optimizer_key="optimizer", storage_reader=dist_cp.FileSystemReader(ckpt_dir), ) optim_state = optim_state["optimizer"] logger.info(f"Optimizer loaded from {ckpt_dir}") flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state) optimizer.load_state_dict(flattened_osd)
accelerate/src/accelerate/utils/fsdp_utils.py/0
{ "file_path": "accelerate/src/accelerate/utils/fsdp_utils.py", "repo_id": "accelerate", "token_count": 4830 }
9
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/tests/deepspeed/ds_config_zero3.json/0
{ "file_path": "accelerate/tests/deepspeed/ds_config_zero3.json", "repo_id": "accelerate", "token_count": 825 }
10
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from accelerate import debug_launcher from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, device_count, execute_subprocess_async, path_in_accelerate_package, require_cpu, require_multi_device, require_non_cpu, test_sync, ) from accelerate.utils import patch_environment class SyncScheduler(unittest.TestCase): test_file_path = path_in_accelerate_package("test_utils", "scripts", "test_sync.py") @require_cpu def test_gradient_sync_cpu_noop(self): debug_launcher(test_sync.main, num_processes=1) @require_cpu def test_gradient_sync_cpu_multi(self): debug_launcher(test_sync.main) @require_non_cpu def test_gradient_sync_gpu(self): test_sync.main() @require_multi_device def test_gradient_sync_gpu_multi(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd)
accelerate/tests/test_grad_sync.py/0
{ "file_path": "accelerate/tests/test_grad_sync.py", "repo_id": "accelerate", "token_count": 579 }
11
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import random import shutil import tempfile import unittest import uuid from contextlib import contextmanager import pytest import torch from parameterized import parameterized_class from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, execute_subprocess_async, require_non_cpu, require_non_torch_xla, ) from accelerate.utils import DistributedType, ProjectConfiguration, set_seed logger = logging.getLogger(__name__) def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2): "Generates a tuple of dummy DataLoaders to test with" def get_dataset(n_batches): x = torch.randn(batch_size * n_batches, 1) return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1)) train_dataset = get_dataset(n_train_batches) valid_dataset = get_dataset(n_valid_batches) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4) return (train_dataloader, valid_dataloader) def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None): "Trains for `num_epochs`" rands = [] for epoch in range(num_epochs): # Train quickly model.train() for batch in dataloader: x, y = batch outputs = model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() rands.append(random.random()) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class DummyModel(nn.Module): "Simple model to do y=mx+b" def __init__(self): super().__init__() self.a = nn.Parameter(torch.randn(1)) self.b = nn.Parameter(torch.randn(1)) def forward(self, x): return x * self.a + self.b def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = "use_safetensors" if param["use_safetensors"] is True else "use_pytorch" return f"{func.__name__}_{param_based_name}" @parameterized_class(("use_safetensors",), [[True], [False]], class_name_func=parameterized_custom_name_func) class CheckpointTest(unittest.TestCase): def check_adam_state(self, state1, state2, distributed_type): # For DistributedType.XLA, the `accelerator.save_state` function calls `xm._maybe_convert_to_cpu` before saving. # As a result, all tuple values are converted to lists. Therefore, we need to convert them back here. # Remove this code once Torch XLA fixes this issue. if distributed_type == DistributedType.XLA: state1["param_groups"][0]["betas"] = tuple(state1["param_groups"][0]["betas"]) state2["param_groups"][0]["betas"] = tuple(state2["param_groups"][0]["betas"]) assert state1 == state2 def test_with_save_limit(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) # Save second state accelerator.save_state(safe_serialization=self.use_safetensors) assert len(os.listdir(accelerator.project_dir)) == 1 def test_can_resume_training_with_folder(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() # Train baseline accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial initial = os.path.join(tmpdir, "initial") accelerator.save_state(initial, safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(initial) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() self.assertEqual(a, a2) self.assertEqual(b, b2) assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything checkpoint = os.path.join(tmpdir, "checkpoint") accelerator.save_state(checkpoint, safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(checkpoint) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_can_resume_training(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state(safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_can_resume_training_checkpoints_relative_path(self): # See #1983 # This test is like test_can_resume_training but uses a relative path for the checkpoint and automatically # infers the checkpoint path when loading. @contextmanager def temporary_relative_directory(): # This is equivalent to tempfile.TemporaryDirectory() except that it returns a relative path rand_dir = f"test_path_{uuid.uuid4()}" os.mkdir(rand_dir) try: yield rand_dir finally: shutil.rmtree(rand_dir) with temporary_relative_directory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state() # <= infer the directory automatically (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) assert opt_state == opt_state2 test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state(safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_invalid_registration(self): t = torch.tensor([1, 2, 3]) t1 = torch.tensor([2, 3, 4]) net = DummyModel() opt = torch.optim.Adam(net.parameters()) accelerator = Accelerator() with self.assertRaises(ValueError) as ve: accelerator.register_for_checkpointing(t, t1, net, opt) message = str(ve.exception) assert "Item at index 0" in message assert "Item at index 1" in message assert "Item at index 2" not in message assert "Item at index 3" not in message def test_with_scheduler(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) scheduler_state = scheduler.state_dict() train(3, model, train_dataloader, optimizer, accelerator, scheduler) assert scheduler_state != scheduler.state_dict() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) assert scheduler_state == scheduler.state_dict() def test_automatic_loading(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) train(2, model, train_dataloader, optimizer, accelerator, scheduler) (a2, b2) = model.a.item(), model.b.item() # Save a first time accelerator.save_state(safe_serialization=self.use_safetensors) train(1, model, train_dataloader, optimizer, accelerator, scheduler) (a3, b3) = model.a.item(), model.b.item() # Load back in the last saved checkpoint, should point to a2, b2 accelerator.load_state() assert a3 != model.a.item() assert b3 != model.b.item() assert a2 == model.a.item() assert b2 == model.b.item() def test_checkpoint_deletion(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() project_config = ProjectConfiguration(automatic_checkpoint_naming=True, total_limit=2) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model = accelerator.prepare(model) # Save 3 states: for _ in range(11): accelerator.save_state(safe_serialization=self.use_safetensors) assert not os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) assert os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_9")) assert os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_10")) @require_non_cpu @require_non_torch_xla def test_map_location(self): cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)] execute_subprocess_async( cmd, env={ **os.environ, "USE_SAFETENSORS": str(self.use_safetensors), "OMP_NUM_THREADS": "1", }, ) if __name__ == "__main__": use_safetensors = os.environ.get("USE_SAFETENSORS", "False") == "True" savedir = "/tmp/accelerate/state_checkpointing" model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) model, optimizer = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: param_device = group["params"][0].device break assert param_device.type == accelerator.device.type model = model.cpu() accelerator.wait_for_everyone() accelerator.save_state(safe_serialization=use_safetensors) accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: param_device = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: param_device = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
accelerate/tests/test_state_checkpointing.py/0
{ "file_path": "accelerate/tests/test_state_checkpointing.py", "repo_id": "accelerate", "token_count": 8972 }
12
# Model arguments model_name_or_path: bigcode/starcoder2-15b model_revision: main torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments chat_template: "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" dataset_mixer: HuggingFaceH4/airoboros-3.2: 1.0 HuggingFaceH4/Code-Feedback: 1.0 HuggingFaceH4/orca-math-word-problems-200k: 1.0 HuggingFaceH4/SystemChat: 1.0 HuggingFaceH4/capybara: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 24 # SFT trainer config bf16: true do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 2 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: starchat2-15b-v0.1 hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 3 output_dir: data/starchat2-15b-v0.1 overwrite_output_dir: true per_device_eval_batch_size: 8 per_device_train_batch_size: 8 push_to_hub: true remove_unused_columns: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/starchat2-15b/sft/config_v0.1.yaml/0
{ "file_path": "alignment-handbook/recipes/starchat2-15b/sft/config_v0.1.yaml", "repo_id": "alignment-handbook", "token_count": 565 }
13
[isort] default_section = FIRSTPARTY ensure_newline_before_comments = True force_grid_wrap = 0 include_trailing_comma = True known_first_party = alignment known_third_party = transformers datasets fugashi git h5py matplotlib nltk numpy packaging pandas psutil pytest rouge_score sacrebleu seqeval sklearn streamlit torch tqdm line_length = 119 lines_after_imports = 2 multi_line_output = 3 use_parentheses = True [flake8] ignore = E203, E501, E741, W503, W605 max-line-length = 119 per-file-ignores = # imported but unused __init__.py: F401 [tool:pytest] doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
alignment-handbook/setup.cfg/0
{ "file_path": "alignment-handbook/setup.cfg", "repo_id": "alignment-handbook", "token_count": 297 }
14
<jupyter_start><jupyter_code>#@title 🤗 AutoTrain DreamBooth #@markdown In order to use this colab #@markdown - upload images to a folder named `images/` #@markdown - choose a project name if you wish #@markdown - change model if you wish, you can also select sd2/2.1 or sd1.5 #@markdown - update prompt and remember it. choose keywords that don't usually appear in dictionaries #@markdown - add huggingface information (token) if you wish to push trained model to huggingface hub #@markdown - update hyperparameters if you wish #@markdown - click `Runtime > Run all` or run each cell individually #@markdown - report issues / feature requests here: https://github.com/huggingface/autotrain-advanced/issues import os !pip install -U autotrain-advanced > install_logs.txt #@markdown --- #@markdown #### Project Config project_name = 'my-dreambooth-project' # @param {type:"string"} model_name = 'stabilityai/stable-diffusion-xl-base-1.0' # @param ["stabilityai/stable-diffusion-xl-base-1.0", "runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "stabilityai/stable-diffusion-2-1-base"] prompt = 'photo of a sks dog' # @param {type: "string"} #@markdown --- #@markdown #### Push to Hub? #@markdown Use these only if you want to push your trained model to a private repo in your Hugging Face Account #@markdown If you dont use these, the model will be saved in Google Colab and you are required to download it manually. #@markdown Please enter your Hugging Face write token. The trained model will be saved to your Hugging Face account. #@markdown You can find your token here: https://huggingface.co/settings/tokens push_to_hub = False # @param ["False", "True"] {type:"raw"} hf_token = "hf_XXX" #@param {type:"string"} hf_username = "abc" #@param {type:"string"} #@markdown --- #@markdown #### Hyperparameters learning_rate = 1e-4 # @param {type:"number"} num_steps = 500 #@param {type:"number"} batch_size = 1 # @param {type:"slider", min:1, max:32, step:1} gradient_accumulation = 4 # @param {type:"slider", min:1, max:32, step:1} resolution = 1024 # @param {type:"slider", min:128, max:1024, step:128} use_8bit_adam = False # @param ["False", "True"] {type:"raw"} use_xformers = False # @param ["False", "True"] {type:"raw"} mixed_precision = "fp16" # @param ["fp16", "bf16", "none"] {type:"raw"} train_text_encoder = False # @param ["False", "True"] {type:"raw"} disable_gradient_checkpointing = False # @param ["False", "True"] {type:"raw"} os.environ["PROJECT_NAME"] = project_name os.environ["MODEL_NAME"] = model_name os.environ["PROMPT"] = prompt os.environ["PUSH_TO_HUB"] = str(push_to_hub) os.environ["HF_TOKEN"] = hf_token os.environ["LEARNING_RATE"] = str(learning_rate) os.environ["NUM_STEPS"] = str(num_steps) os.environ["BATCH_SIZE"] = str(batch_size) os.environ["GRADIENT_ACCUMULATION"] = str(gradient_accumulation) os.environ["RESOLUTION"] = str(resolution) os.environ["USE_8BIT_ADAM"] = str(use_8bit_adam) os.environ["USE_XFORMERS"] = str(use_xformers) os.environ["MIXED_PRECISION"] = str(mixed_precision) os.environ["TRAIN_TEXT_ENCODER"] = str(train_text_encoder) os.environ["DISABLE_GRADIENT_CHECKPOINTING"] = str(disable_gradient_checkpointing) os.environ["HF_USERNAME"] = hf_username !autotrain dreambooth \ --model ${MODEL_NAME} \ --project-name ${PROJECT_NAME} \ --image-path images/ \ --prompt "${PROMPT}" \ --resolution ${RESOLUTION} \ --batch-size ${BATCH_SIZE} \ --num-steps ${NUM_STEPS} \ --gradient-accumulation ${GRADIENT_ACCUMULATION} \ --lr ${LEARNING_RATE} \ --mixed-precision ${MIXED_PRECISION} \ --username ${HF_USERNAME} \ $( [[ "$USE_XFORMERS" == "True" ]] && echo "--xformers" ) \ $( [[ "$TRAIN_TEXT_ENCODER" == "True" ]] && echo "--train-text-encoder" ) \ $( [[ "$USE_8BIT_ADAM" == "True" ]] && echo "--use-8bit-adam" ) \ $( [[ "$DISABLE_GRADIENT_CHECKPOINTING" == "True" ]] && echo "--disable_gradient-checkpointing" ) \ $( [[ "$PUSH_TO_HUB" == "True" ]] && echo "--push-to-hub --token ${HF_TOKEN}" ) # Inference # this is the inference code that you can use after you have trained your model # Unhide code below and change prj_path to your repo or local path (e.g. my_dreambooth_project) # # # # from diffusers import DiffusionPipeline, StableDiffusionXLImg2ImgPipeline # import torch # prj_path = "username/repo_name" # model = "stabilityai/stable-diffusion-xl-base-1.0" # pipe = DiffusionPipeline.from_pretrained( # model, # torch_dtype=torch.float16, # ) # pipe.to("cuda") # pipe.load_lora_weights(prj_path, weight_name="pytorch_lora_weights.safetensors") # refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( # "stabilityai/stable-diffusion-xl-refiner-1.0", # torch_dtype=torch.float16, # ) # refiner.to("cuda") # prompt = "photo of a sks dog in a bucket" # seed = 42 # generator = torch.Generator("cuda").manual_seed(seed) # image = pipe(prompt=prompt, generator=generator).images[0] # image = refiner(prompt=prompt, generator=generator, image=image).images[0] # image.save(f"generated_image.png")<jupyter_output><empty_output>
autotrain-advanced/colabs/AutoTrain_Dreambooth.ipynb/0
{ "file_path": "autotrain-advanced/colabs/AutoTrain_Dreambooth.ipynb", "repo_id": "autotrain-advanced", "token_count": 1845 }
15
task: token_classification base_model: google-bert/bert-base-uncased project_name: autotrain-bert-custom-finetuned log: tensorboard backend: local data: path: data/ # this must be the path to the directory containing the train and valid files train_split: train # this must be either train.json valid_split: valid # this must be either valid.json, can also be set to null column_mapping: text_column: text # this must be the name of the column containing the text target_column: label # this must be the name of the column containing the target params: max_seq_length: 512 epochs: 3 batch_size: 4 lr: 2e-5 optimizer: adamw_torch scheduler: linear gradient_accumulation: 1 mixed_precision: fp16 hub: username: ${HF_USERNAME} token: ${HF_TOKEN} push_to_hub: true
autotrain-advanced/configs/token_classification/local_dataset.yml/0
{ "file_path": "autotrain-advanced/configs/token_classification/local_dataset.yml", "repo_id": "autotrain-advanced", "token_count": 262 }
16
# Lint as: python3 """ HuggingFace / AutoTrain Advanced """ import os from setuptools import find_packages, setup DOCLINES = __doc__.split("\n") this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f: LONG_DESCRIPTION = f.read() # get INSTALL_REQUIRES from requirements.txt INSTALL_REQUIRES = [] requirements_path = os.path.join(this_directory, "requirements.txt") with open(requirements_path, encoding="utf-8") as f: for line in f: # Exclude 'bitsandbytes' if installing on macOS if "bitsandbytes" in line: line = line.strip() + " ; sys_platform == 'linux'" INSTALL_REQUIRES.append(line.strip()) else: INSTALL_REQUIRES.append(line.strip()) QUALITY_REQUIRE = [ "black", "isort", "flake8==3.7.9", ] TESTS_REQUIRE = ["pytest"] EXTRAS_REQUIRE = { "dev": INSTALL_REQUIRES + QUALITY_REQUIRE + TESTS_REQUIRE, "quality": INSTALL_REQUIRES + QUALITY_REQUIRE, "docs": INSTALL_REQUIRES + [ "recommonmark", "sphinx==3.1.2", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton", ], } setup( name="autotrain-advanced", description=DOCLINES[0], long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", author="HuggingFace Inc.", author_email="autotrain@huggingface.co", url="https://github.com/huggingface/autotrain-advanced", download_url="https://github.com/huggingface/autotrain-advanced/tags", license="Apache 2.0", package_dir={"": "src"}, packages=find_packages("src"), extras_require=EXTRAS_REQUIRE, install_requires=INSTALL_REQUIRES, entry_points={"console_scripts": ["autotrain=autotrain.cli.autotrain:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], keywords="automl autonlp autotrain huggingface", data_files=[ ("static", ["src/autotrain/app/static/logo.png"]), ( "templates", [ "src/autotrain/app/templates/index.html", "src/autotrain/app/templates/error.html", "src/autotrain/app/templates/duplicate.html", "src/autotrain/app/templates/login.html", ], ), ], include_package_data=True, )
autotrain-advanced/setup.py/0
{ "file_path": "autotrain-advanced/setup.py", "repo_id": "autotrain-advanced", "token_count": 1267 }
17
import os import signal import sys import psutil import requests from autotrain import config, logger def get_running_jobs(db): running_jobs = db.get_running_jobs() if running_jobs: for _pid in running_jobs: proc_status = get_process_status(_pid) proc_status = proc_status.strip().lower() if proc_status in ("completed", "error", "zombie"): logger.info(f"Killing PID: {_pid}") try: kill_process_by_pid(_pid) except Exception as e: logger.info(f"Error while killing process: {e}") logger.info(f"Process {_pid} is already completed. Skipping...") db.delete_job(_pid) running_jobs = db.get_running_jobs() return running_jobs def get_process_status(pid): try: process = psutil.Process(pid) proc_status = process.status() return proc_status except psutil.NoSuchProcess: logger.info(f"No process found with PID: {pid}") return "Completed" def kill_process_by_pid(pid): """Kill process by PID.""" def sigint_handler(signum, frame): """Handle SIGINT signal gracefully.""" logger.info("SIGINT received. Exiting gracefully...") sys.exit(0) # Exit with code 0 signal.signal(signal.SIGINT, sigint_handler) os.kill(pid, signal.SIGTERM) def token_verification(token): if token.startswith("hf_oauth"): _api_url = config.HF_API + "/oauth/userinfo" else: _api_url = config.HF_API + "/api/whoami-v2" headers = {} cookies = {} if token.startswith("hf_"): headers["Authorization"] = f"Bearer {token}" else: cookies = {"token": token} try: response = requests.get( _api_url, headers=headers, cookies=cookies, timeout=3, ) except (requests.Timeout, ConnectionError) as err: logger.error(f"Failed to request whoami-v2 - {repr(err)}") raise Exception("Hugging Face Hub is unreachable, please try again later.") if response.status_code != 200: logger.error(f"Failed to request whoami-v2 - {response.status_code}") raise Exception("Invalid token. Please login with a write token.") resp = response.json() user_info = {} if token.startswith("hf_oauth"): user_info["id"] = resp["sub"] user_info["name"] = resp["preferred_username"] user_info["orgs"] = [resp["orgs"][k]["preferred_username"] for k in range(len(resp["orgs"]))] else: user_info["id"] = resp["id"] user_info["name"] = resp["name"] user_info["orgs"] = [resp["orgs"][k]["name"] for k in range(len(resp["orgs"]))] return user_info def get_user_and_orgs(user_token): if user_token is None: raise Exception("Please login with a write token.") if user_token is None or len(user_token) == 0: raise Exception("Invalid token. Please login with a write token.") user_info = token_verification(token=user_token) username = user_info["name"] orgs = user_info["orgs"] who_is_training = [username] + orgs return who_is_training
autotrain-advanced/src/autotrain/app/utils.py/0
{ "file_path": "autotrain-advanced/src/autotrain/app/utils.py", "repo_id": "autotrain-advanced", "token_count": 1392 }
18
from argparse import ArgumentParser from autotrain import logger from autotrain.cli.utils import common_args, seq2seq_munge_data from autotrain.project import AutoTrainProject from autotrain.trainers.seq2seq.params import Seq2SeqParams from . import BaseAutoTrainCommand def run_seq2seq_command_factory(args): return RunAutoTrainSeq2SeqCommand(args) class RunAutoTrainSeq2SeqCommand(BaseAutoTrainCommand): @staticmethod def register_subcommand(parser: ArgumentParser): arg_list = [ { "arg": "--text-column", "help": "Specify the column name in the dataset that contains the text data. Useful for distinguishing between multiple text fields. Default is 'text'.", "required": False, "type": str, "default": "text", }, { "arg": "--target-column", "help": "Specify the column name that holds the target data for training. Helps in distinguishing different potential outputs. Default is 'target'.", "required": False, "type": str, "default": "target", }, { "arg": "--max-seq-length", "help": "Set the maximum sequence length (number of tokens) that the model should handle in a single input. Longer sequences are truncated. Affects both memory usage and computational requirements. Default is 128 tokens.", "required": False, "type": int, "default": 128, }, { "arg": "--max-target-length", "help": "Define the maximum number of tokens for the target sequence in each input. Useful for models that generate outputs, ensuring uniformity in sequence length. Default is set to 128 tokens.", "required": False, "type": int, "default": 128, }, { "arg": "--warmup-ratio", "help": "Define the proportion of training to be dedicated to a linear warmup where learning rate gradually increases. This can help in stabilizing the training process early on. Default ratio is 0.1.", "required": False, "type": float, "default": 0.1, }, { "arg": "--optimizer", "help": "Choose the optimizer algorithm for training the model. Different optimizers can affect the training speed and model performance. 'adamw_torch' is used by default.", "required": False, "type": str, "default": "adamw_torch", }, { "arg": "--scheduler", "help": "Select the learning rate scheduler to adjust the learning rate based on the number of epochs. 'linear' decreases the learning rate linearly from the initial lr set. Default is 'linear'. Try 'cosine' for a cosine annealing schedule.", "required": False, "type": str, "default": "linear", }, { "arg": "--weight-decay", "help": "Set the weight decay rate to apply for regularization. Helps in preventing the model from overfitting by penalizing large weights. Default is 0.0, meaning no weight decay is applied.", "required": False, "type": float, "default": 0.0, }, { "arg": "--max-grad-norm", "help": "Specify the maximum norm of the gradients for gradient clipping. Gradient clipping is used to prevent the exploding gradient problem in deep neural networks. Default is 1.0.", "required": False, "type": float, "default": 1.0, }, { "arg": "--logging-steps", "help": "Determine how often to log training progress. Set this to the number of steps between each log output. -1 determines logging steps automatically. Default is -1.", "required": False, "type": int, "default": -1, }, { "arg": "--evaluation-strategy", "help": "Specify how often to evaluate the model performance. Options include 'no', 'steps', 'epoch'. 'epoch' evaluates at the end of each training epoch by default.", "required": False, "type": str, "default": "epoch", }, { "arg": "--save-total-limit", "help": "Limit the total number of model checkpoints to save. Helps manage disk space by retaining only the most recent checkpoints. Default is to save only the latest one.", "required": False, "type": int, "default": 1, }, { "arg": "--auto-find-batch-size", "help": "Enable automatic batch size determination based on your hardware capabilities. When set, it tries to find the largest batch size that fits in memory.", "required": False, "action": "store_true", }, { "arg": "--mixed-precision", "help": "Choose the precision mode for training to optimize performance and memory usage. Options are 'fp16', 'bf16', or None for default precision. Default is None.", "required": False, "type": str, "default": None, "choices": ["fp16", "bf16", None], }, { "arg": "--peft", "help": "Enable LoRA-PEFT", "required": False, "action": "store_true", }, { "arg": "--quantization", "help": "Select the quantization mode to reduce model size and potentially increase inference speed. Options include 'int8' for 8-bit integer quantization or None for no quantization. Default is None", "required": False, "type": str, "default": None, "choices": ["int8", None], }, { "arg": "--lora-r", "help": "Set the rank 'R' for the LoRA (Low-Rank Adaptation) technique. Default is 16.", "required": False, "type": int, "default": 16, }, { "arg": "--lora-alpha", "help": "Specify the 'Alpha' parameter for LoRA. Default is 32.", "required": False, "type": int, "default": 32, }, { "arg": "--lora-dropout", "help": "Determine the dropout rate to apply in the LoRA layers, which can help in preventing overfitting by randomly disabling a fraction of neurons during training. Default rate is 0.05.", "required": False, "type": float, "default": 0.05, }, { "arg": "--target-modules", "help": "List the modules within the model architecture that should be targeted for specific techniques such as LoRA adaptations. Useful for fine-tuning particular components of large models. By default all linear layers are targeted.", "required": False, "type": str, "default": "all-linear", }, ] arg_list = common_args() + arg_list run_seq2seq_parser = parser.add_parser("seq2seq", description="✨ Run AutoTrain Seq2Seq") for arg in arg_list: if "action" in arg: run_seq2seq_parser.add_argument( arg["arg"], help=arg["help"], required=arg.get("required", False), action=arg.get("action"), default=arg.get("default"), ) else: run_seq2seq_parser.add_argument( arg["arg"], help=arg["help"], required=arg.get("required", False), type=arg.get("type"), default=arg.get("default"), choices=arg.get("choices"), ) run_seq2seq_parser.set_defaults(func=run_seq2seq_command_factory) def __init__(self, args): self.args = args store_true_arg_names = ["train", "deploy", "inference", "auto_find_batch_size", "push_to_hub", "peft"] for arg_name in store_true_arg_names: if getattr(self.args, arg_name) is None: setattr(self.args, arg_name, False) if self.args.train: if self.args.project_name is None: raise ValueError("Project name must be specified") if self.args.data_path is None: raise ValueError("Data path must be specified") if self.args.model is None: raise ValueError("Model must be specified") if self.args.push_to_hub: if self.args.username is None: raise ValueError("Username must be specified for push to hub") else: raise ValueError("Must specify --train, --deploy or --inference") def run(self): logger.info("Running Seq2Seq Classification") if self.args.train: params = Seq2SeqParams(**vars(self.args)) params = seq2seq_munge_data(params, local=self.args.backend.startswith("local")) project = AutoTrainProject(params=params, backend=self.args.backend) job_id = project.create() logger.info(f"Job ID: {job_id}")
autotrain-advanced/src/autotrain/cli/run_seq2seq.py/0
{ "file_path": "autotrain-advanced/src/autotrain/cli/run_seq2seq.py", "repo_id": "autotrain-advanced", "token_count": 4732 }
19
import io import json import os from dataclasses import dataclass from typing import Any, List from huggingface_hub import HfApi, create_repo from autotrain import logger @dataclass class DreamboothPreprocessor: concept_images: List[Any] concept_name: str username: str project_name: str token: str local: bool def __post_init__(self): self.repo_name = f"{self.username}/autotrain-data-{self.project_name}" if not self.local: try: create_repo( repo_id=self.repo_name, repo_type="dataset", token=self.token, private=True, exist_ok=False, ) except Exception: logger.error("Error creating repo") raise ValueError("Error creating repo") def _upload_concept_images(self, file, api): logger.info(f"Uploading {file} to concept1") if isinstance(file, str): path_in_repo = f"concept1/{file.split('/')[-1]}" else: path_in_repo = f"concept1/{file.filename.split('/')[-1]}" api.upload_file( path_or_fileobj=file if isinstance(file, str) else file.file.read(), path_in_repo=path_in_repo, repo_id=self.repo_name, repo_type="dataset", token=self.token, ) def _upload_concept_prompts(self, api): _prompts = {} _prompts["concept1"] = self.concept_name prompts = json.dumps(_prompts) prompts = prompts.encode("utf-8") prompts = io.BytesIO(prompts) api.upload_file( path_or_fileobj=prompts, path_in_repo="prompts.json", repo_id=self.repo_name, repo_type="dataset", token=self.token, ) def _save_concept_images(self, file): logger.info("Saving concept images") logger.info(file) if isinstance(file, str): _file = file path = f"{self.project_name}/autotrain-data/concept1/{_file.split('/')[-1]}" else: _file = file.file.read() path = f"{self.project_name}/autotrain-data/concept1/{file.filename.split('/')[-1]}" os.makedirs(os.path.dirname(path), exist_ok=True) # if file is a string, copy the file to the new location if isinstance(file, str): with open(_file, "rb") as f: with open(path, "wb") as f2: f2.write(f.read()) else: with open(path, "wb") as f: f.write(_file) def _save_concept_prompts(self): _prompts = {} _prompts["concept1"] = self.concept_name path = f"{self.project_name}/autotrain-data/prompts.json" with open(path, "w", encoding="utf-8") as f: json.dump(_prompts, f) def prepare(self): api = HfApi(token=self.token) for _file in self.concept_images: if self.local: self._save_concept_images(_file) else: self._upload_concept_images(_file, api) if self.local: self._save_concept_prompts() else: self._upload_concept_prompts(api) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}"
autotrain-advanced/src/autotrain/preprocessor/dreambooth.py/0
{ "file_path": "autotrain-advanced/src/autotrain/preprocessor/dreambooth.py", "repo_id": "autotrain-advanced", "token_count": 1767 }
20
from functools import partial import torch from datasets import Dataset from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from peft.tuners.lora import LoraLayer from transformers import ( AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig, Trainer, TrainingArguments, default_data_collator, ) from transformers.trainer_callback import PrinterCallback from autotrain import logger from autotrain.trainers.clm import utils from autotrain.trainers.clm.params import LLMTrainingParams from autotrain.trainers.common import ALLOW_REMOTE_CODE def process_data(data, tokenizer, config): data = data.to_pandas() data = data.fillna("") data = data[[config.text_column]] if config.add_eos_token: data[config.text_column] = data[config.text_column] + tokenizer.eos_token data = Dataset.from_pandas(data) return data def train(config): logger.info("Starting default/generic CLM training...") if isinstance(config, dict): config = LLMTrainingParams(**config) train_data, valid_data = utils.process_input_data(config) tokenizer = utils.get_tokenizer(config) train_data, valid_data = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data) train_data = process_data( data=train_data, tokenizer=tokenizer, config=config, ) if config.valid_split is not None: valid_data = process_data( data=valid_data, tokenizer=tokenizer, config=config, ) logging_steps = utils.configure_logging_steps(config, train_data, valid_data) training_args = utils.configure_training_args(config, logging_steps) config = utils.configure_block_size(config, tokenizer) args = TrainingArguments(**training_args) logger.info("loading model config...") model_config = AutoConfig.from_pretrained( config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=config.disable_gradient_checkpointing, ) logger.info("loading model...") if config.peft: if config.quantization == "int4": bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False, ) elif config.quantization == "int8": bnb_config = BitsAndBytesConfig(load_in_8bit=True) else: bnb_config = None model = AutoModelForCausalLM.from_pretrained( config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2, ) else: model = AutoModelForCausalLM.from_pretrained( config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2, ) logger.info(f"model dtype: {model.dtype}") model.resize_token_embeddings(len(tokenizer)) if config.peft: logger.info("preparing peft model...") if config.quantization is not None: gradient_checkpointing_kwargs = {} if not config.disable_gradient_checkpointing: if config.quantization in ("int4", "int8"): gradient_checkpointing_kwargs = {"use_reentrant": True} else: gradient_checkpointing_kwargs = {"use_reentrant": False} model = prepare_model_for_kbit_training( model, use_gradient_checkpointing=not config.disable_gradient_checkpointing, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) else: model.enable_input_require_grads() peft_config = LoraConfig( r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias="none", task_type="CAUSAL_LM", target_modules=utils.get_target_modules(config), ) model = get_peft_model(model, peft_config) tokenize_fn = partial(utils.tokenize, tokenizer=tokenizer, config=config) group_texts_fn = partial(utils.group_texts, config=config) train_data = train_data.map( tokenize_fn, batched=True, num_proc=1, remove_columns=list(train_data.features), desc="Running tokenizer on train dataset", ) if config.valid_split is not None: valid_data = valid_data.map( tokenize_fn, batched=True, num_proc=1, remove_columns=list(valid_data.features), desc="Running tokenizer on validation dataset", ) train_data = train_data.map( group_texts_fn, batched=True, num_proc=4, desc=f"Grouping texts in chunks of {config.block_size}", ) if config.valid_split is not None: valid_data = valid_data.map( group_texts_fn, batched=True, num_proc=4, desc=f"Grouping texts in chunks of {config.block_size}", ) logger.info("creating trainer") callbacks = utils.get_callbacks(config) trainer_args = dict( args=args, model=model, callbacks=callbacks, ) trainer = Trainer( **trainer_args, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, tokenizer=tokenizer, data_collator=default_data_collator, ) for name, module in trainer.model.named_modules(): if isinstance(module, LoraLayer): if config.mixed_precision == "bf16": module = module.to(torch.bfloat16) if "norm" in name: module = module.to(torch.float32) if any(x in name for x in ["lm_head", "embed_tokens", "wte", "wpe"]): if hasattr(module, "weight"): if config.mixed_precision == "bf16" and module.weight.dtype == torch.float32: module = module.to(torch.bfloat16) trainer.remove_callback(PrinterCallback) trainer.train() utils.post_training_steps(config, trainer)
autotrain-advanced/src/autotrain/trainers/clm/train_clm_default.py/0
{ "file_path": "autotrain-advanced/src/autotrain/trainers/clm/train_clm_default.py", "repo_id": "autotrain-advanced", "token_count": 2966 }
21
import argparse import json from autotrain import logger from autotrain.trainers.common import monitor, pause_space from autotrain.trainers.generic import utils from autotrain.trainers.generic.params import GenericParams def parse_args(): # get training_config.json from the end user parser = argparse.ArgumentParser() parser.add_argument("--config", type=str, required=True) return parser.parse_args() @monitor def run(config): if isinstance(config, dict): config = GenericParams(**config) # download the data repo logger.info("Downloading data repo...") utils.pull_dataset_repo(config) logger.info("Unintalling requirements...") utils.uninstall_requirements(config) # install the requirements logger.info("Installing requirements...") utils.install_requirements(config) # run the command logger.info("Running command...") utils.run_command(config) pause_space(config) if __name__ == "__main__": args = parse_args() _config = json.load(open(args.config)) _config = GenericParams(**_config) run(_config)
autotrain-advanced/src/autotrain/trainers/generic/__main__.py/0
{ "file_path": "autotrain-advanced/src/autotrain/trainers/generic/__main__.py", "repo_id": "autotrain-advanced", "token_count": 376 }
22
from typing import Optional from pydantic import Field from autotrain.trainers.common import AutoTrainParams class Seq2SeqParams(AutoTrainParams): data_path: str = Field(None, title="Data path") model: str = Field("google/flan-t5-base", title="Model name") username: Optional[str] = Field(None, title="Hugging Face Username") seed: int = Field(42, title="Seed") train_split: str = Field("train", title="Train split") valid_split: Optional[str] = Field(None, title="Validation split") project_name: str = Field("project-name", title="Output directory") token: Optional[str] = Field(None, title="Hub Token") push_to_hub: bool = Field(False, title="Push to hub") text_column: str = Field("text", title="Text column") target_column: str = Field("target", title="Target text column") lr: float = Field(5e-5, title="Learning rate") epochs: int = Field(3, title="Number of training epochs") max_seq_length: int = Field(128, title="Max sequence length") max_target_length: int = Field(128, title="Max target sequence length") batch_size: int = Field(2, title="Training batch size") warmup_ratio: float = Field(0.1, title="Warmup proportion") gradient_accumulation: int = Field(1, title="Gradient accumulation steps") optimizer: str = Field("adamw_torch", title="Optimizer") scheduler: str = Field("linear", title="Scheduler") weight_decay: float = Field(0.0, title="Weight decay") max_grad_norm: float = Field(1.0, title="Max gradient norm") logging_steps: int = Field(-1, title="Logging steps") evaluation_strategy: str = Field("epoch", title="Evaluation strategy") auto_find_batch_size: bool = Field(False, title="Auto find batch size") mixed_precision: Optional[str] = Field(None, title="fp16, bf16, or None") save_total_limit: int = Field(1, title="Save total limit") token: Optional[str] = Field(None, title="Hub Token") push_to_hub: bool = Field(False, title="Push to hub") peft: bool = Field(False, title="Use PEFT") quantization: Optional[str] = Field("int4", title="int4, int8, or None") lora_r: int = Field(16, title="LoRA-R") lora_alpha: int = Field(32, title="LoRA-Alpha") lora_dropout: float = Field(0.05, title="LoRA-Dropout") target_modules: str = Field("all-linear", title="Target modules for PEFT") log: str = Field("none", title="Logging using experiment tracking")
autotrain-advanced/src/autotrain/trainers/seq2seq/params.py/0
{ "file_path": "autotrain-advanced/src/autotrain/trainers/seq2seq/params.py", "repo_id": "autotrain-advanced", "token_count": 839 }
23
--- title: "How 🤗 Accelerate runs very large models thanks to PyTorch" thumbnail: /blog/assets/104_accelerate-large-models/thumbnail.png authors: - user: sgugger --- # How 🤗 Accelerate runs very large models thanks to PyTorch ## Load and run large models Meta AI and BigScience recently open-sourced very large language models which won't fit into memory (RAM or GPU) of most consumer hardware. At Hugging Face, part of our mission is to make even those large models accessible, so we developed tools to allow you to run those models even if you don't own a supercomputer. All the examples picked in this blog post run on a free Colab instance (with limited RAM and disk space) if you have access to more disk space, don't hesitate to pick larger checkpoints. Here is how we can run OPT-6.7B: ```python import torch from transformers import pipeline # This works on a base Colab instance. # Pick a larger checkpoint if you have time to wait and enough disk space! checkpoint = "facebook/opt-6.7b" generator = pipeline("text-generation", model=checkpoint, device_map="auto", torch_dtype=torch.float16) # Perform inference generator("More and more large language models are opensourced so Hugging Face has") ``` We'll explain what each of those arguments do in a moment, but first just consider the traditional model loading pipeline in PyTorch: it usually consists of: 1. Create the model 2. Load in memory its weights (in an object usually called `state_dict`) 3. Load those weights in the created model 4. Move the model on the device for inference While that has worked pretty well in the past years, very large models make this approach challenging. Here the model picked has 6.7 *billion* parameters. In the default precision, it means that just step 1 (creating the model) will take roughly **26.8GB** in RAM (1 parameter in float32 takes 4 bytes in memory). This can't even fit in the RAM you get on Colab. Then step 2 will load in memory a second copy of the model (so another 26.8GB in RAM in default precision). If you were trying to load the largest models, for example BLOOM or OPT-176B (which both have 176 billion parameters), like this, you would need 1.4 **terabytes** of CPU RAM. That is a bit excessive! And all of this to just move the model on one (or several) GPU(s) at step 4. Clearly we need something smarter. In this blog post, we'll explain how Accelerate leverages PyTorch features to load and run inference with very large models, even if they don't fit in RAM or one GPU. In a nutshell, it changes the process above like this: 1. Create an empty (e.g. without weights) model 2. Decide where each layer is going to go (when multiple devices are available) 3. Load in memory parts of its weights 4. Load those weights in the empty model 5. Move the weights on the device for inference 6. Repeat from step 3 for the next weights until all the weights are loaded ## Creating an empty model PyTorch 1.9 introduced a new kind of device called the *meta* device. This allows us to create tensor without any data attached to them: a tensor on the meta device only needs a shape. As long as you are on the meta device, you can thus create arbitrarily large tensors without having to worry about CPU (or GPU) RAM. For instance, the following code will crash on Colab: ```python import torch large_tensor = torch.randn(100000, 100000) ``` as this large tensor requires `4 * 10**10` bytes (the default precision is FP32, so each element of the tensor takes 4 bytes) thus 40GB of RAM. The same on the meta device works just fine however: ```python import torch large_tensor = torch.randn(100000, 100000, device="meta") ``` If you try to display this tensor, here is what PyTorch will print: ``` tensor(..., device='meta', size=(100000, 100000)) ``` As we said before, there is no data associated with this tensor, just a shape. You can instantiate a model directly on the meta device: ```python large_model = torch.nn.Linear(100000, 100000, device="meta") ``` But for an existing model, this syntax would require you to rewrite all your modeling code so that each submodule accepts and passes along a `device` keyword argument. Since this was impractical for the 150 models of the Transformers library, we developed a context manager that will instantiate an empty model for you. Here is how you can instantiate an empty version of BLOOM: ```python from accelerate import init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained("bigscience/bloom") with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) ``` This works on any model, but you get back a shell you can't use directly: some operations are implemented for the meta device, but not all yet. Here for instance, you can use the `large_model` defined above with an input, but not the BLOOM model. Even when using it, the output will be a tensor of the meta device, so you will get the shape of the result, but nothing more. As further work on this, the PyTorch team is working on a new [class `FakeTensor`](https://pytorch.org/torchdistx/latest/fake_tensor.html), which is a bit like tensors on the meta device, but with the device information (on top of shape and dtype) Since we know the shape of each weight, we can however know how much memory they will all consume once we load the pretrained tensors fully. Therefore, we can make a decision on how to split our model across CPUs and GPUs. ## Computing a device map Before we start loading the pretrained weights, we will need to know where we want to put them. This way we can free the CPU RAM each time we have put a weight in its right place. This can be done with the empty model on the meta device, since we only need to know the shape of each tensor and its dtype to compute how much space it will take in memory. Accelerate provides a function to automatically determine a *device map* from an empty model. It will try to maximize the use of all available GPUs, then CPU RAM, and finally flag the weights that don't fit for disk offload. Let's have a look using [OPT-13b](https://huggingface.co/facebook/opt-13b). ```python from accelerate import infer_auto_device_map, init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained("facebook/opt-13b") with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) device_map = infer_auto_device_map(model) ``` This will return a dictionary mapping modules or weights to a device. On a machine with one Titan RTX for instance, we get the following: ```python out {'model.decoder.embed_tokens': 0, 'model.decoder.embed_positions': 0, 'model.decoder.final_layer_norm': 0, 'model.decoder.layers.0': 0, 'model.decoder.layers.1': 0, ... 'model.decoder.layers.9': 0, 'model.decoder.layers.10.self_attn': 0, 'model.decoder.layers.10.activation_fn': 0, 'model.decoder.layers.10.self_attn_layer_norm': 0, 'model.decoder.layers.10.fc1': 'cpu', 'model.decoder.layers.10.fc2': 'cpu', 'model.decoder.layers.10.final_layer_norm': 'cpu', 'model.decoder.layers.11': 'cpu', ... 'model.decoder.layers.17': 'cpu', 'model.decoder.layers.18.self_attn': 'cpu', 'model.decoder.layers.18.activation_fn': 'cpu', 'model.decoder.layers.18.self_attn_layer_norm': 'cpu', 'model.decoder.layers.18.fc1': 'disk', 'model.decoder.layers.18.fc2': 'disk', 'model.decoder.layers.18.final_layer_norm': 'disk', 'model.decoder.layers.19': 'disk', ... 'model.decoder.layers.39': 'disk', 'lm_head': 'disk'} ``` Accelerate evaluated that the embeddings and the decoder up until the 9th block could all fit on the GPU (device 0), then part of the 10th block needs to be on the CPU, as well as the following weights until the 17th layer. Then the 18th layer is split between the CPU and the disk and the following layers must all be offloaded to disk Actually using this device map later on won't work, because the layers composing this model have residual connections (where the input of the block is added to the output of the block) so all of a given layer should be on the same device. We can indicate this to Accelerate by passing a list of module names that shouldn't be split with the `no_split_module_classes` keyword argument: ```python device_map = infer_auto_device_map(model, no_split_module_classes=["OPTDecoderLayer"]) ``` This will then return ```python out 'model.decoder.embed_tokens': 0, 'model.decoder.embed_positions': 0, 'model.decoder.final_layer_norm': 0, 'model.decoder.layers.0': 0, 'model.decoder.layers.1': 0, ... 'model.decoder.layers.9': 0, 'model.decoder.layers.10': 'cpu', 'model.decoder.layers.11': 'cpu', ... 'model.decoder.layers.17': 'cpu', 'model.decoder.layers.18': 'disk', ... 'model.decoder.layers.39': 'disk', 'lm_head': 'disk'} ``` Now, each layer is always on the same device. In Transformers, when using `device_map` in the `from_pretrained()` method or in a `pipeline`, those classes of blocks to leave on the same device are automatically provided, so you don't need to worry about them. Note that you have the following options for `device_map` (only relevant when you have more than one GPU): - `"auto"` or `"balanced"`: Accelerate will split the weights so that each GPU is used equally; - `"balanced_low_0"`: Accelerate will split the weights so that each GPU is used equally except the first one, where it will try to have as little weights as possible (useful when you want to work with the outputs of the model on one GPU, for instance when using the `generate` function); - `"sequential"`: Accelerate will fill the GPUs in order (so the last ones might not be used at all). You can also pass your own `device_map` as long as it follows the format we saw before (dictionary layer/module names to device). Finally, note that the results of the `device_map` you receive depend on the selected dtype (as different types of floats take a different amount of space). Providing `dtype="float16"` will give us different results: ```python device_map = infer_auto_device_map(model, no_split_module_classes=["OPTDecoderLayer"], dtype="float16") ``` In this precision, we can fit the model up to layer 21 on the GPU: ```python out {'model.decoder.embed_tokens': 0, 'model.decoder.embed_positions': 0, 'model.decoder.final_layer_norm': 0, 'model.decoder.layers.0': 0, 'model.decoder.layers.1': 0, ... 'model.decoder.layers.21': 0, 'model.decoder.layers.22': 'cpu', ... 'model.decoder.layers.37': 'cpu', 'model.decoder.layers.38': 'disk', 'model.decoder.layers.39': 'disk', 'lm_head': 'disk'} ``` Now that we know where each weight is supposed to go, we can progressively load the pretrained weights inside the model. ## Sharding state dicts Traditionally, PyTorch models are saved in a whole file containing a map from parameter name to weight. This map is often called a `state_dict`. Here is an excerpt from the [PyTorch documentation](https://pytorch.org/tutorials/beginner/basics/saveloadrun_tutorial.html) on saving on loading: ```python # Save the model weights torch.save(my_model.state_dict(), 'model_weights.pth') # Reload them new_model = ModelClass() new_model.load_state_dict(torch.load('model_weights.pth')) ``` This works pretty well for models with less than 1 billion parameters, but for larger models, this is very taxing in RAM. The BLOOM model has 176 billions parameters; even with the weights saved in bfloat16 to save space, it still represents 352GB as a whole. While the super computer that trained this model might have this amount of memory available, requiring this for inference is unrealistic. This is why large models on the Hugging Face Hub are not saved and shared with one big file containing all the weights, but **several** of them. If you go to the [BLOOM model page](https://huggingface.co/bigscience/bloom/tree/main) for instance, you will see there is 72 files named `pytorch_model_xxxxx-of-00072.bin`, which each contain part of the model weights. Using this format, we can load one part of the state dict in memory, put the weights inside the model, move them on the right device, then discard this state dict part before going to the next. Instead of requiring to have enough RAM to accommodate the whole model, we only need enough RAM to get the biggest checkpoint part, which we call a **shard**, so 7.19GB in the case of BLOOM. We call the checkpoints saved in several files like BLOOM *sharded checkpoints*, and we have standardized their format as such: - One file (called `pytorch_model.bin.index.json`) contains some metadata and a map parameter name to file name, indicating where to find each weight - All the other files are standard PyTorch state dicts, they just contain a part of the model instead of the whole one. You can have a look at the content of the index file [here](https://huggingface.co/bigscience/bloom/blob/main/pytorch_model.bin.index.json). To load such a sharded checkpoint into a model, we just need to loop over the various shards. Accelerate provides a function called `load_checkpoint_in_model` that will do this for you if you have cloned one of the repos of the Hub, or you can directly use the `from_pretrained` method of Transformers, which will handle the downloading and caching for you: ```python import torch from transformers import AutoModelForCausalLM # Will error checkpoint = "facebook/opt-13b" model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.float16) ``` If the device map computed automatically requires some weights to be offloaded on disk because you don't have enough GPU and CPU RAM, you will get an error indicating you need to pass an folder where the weights that should be stored on disk will be offloaded: ```python out ValueError: The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder` for them. ``` Adding this argument should resolve the error: ```python import torch from transformers import AutoModelForCausalLM # Will go out of RAM on Colab checkpoint = "facebook/opt-13b" model = AutoModelForCausalLM.from_pretrained( checkpoint, device_map="auto", offload_folder="offload", torch_dtype=torch.float16 ) ``` Note that if you are trying to load a very large model that require some disk offload on top of CPU offload, you might run out of RAM when the last shards of the checkpoint are loaded, since there is the part of the model staying on CPU taking space. If that is the case, use the option `offload_state_dict=True` to temporarily offload the part of the model staying on CPU while the weights are all loaded, and reload it in RAM once all the weights have been processed ```python import torch from transformers import AutoModelForCausalLM checkpoint = "facebook/opt-13b" model = AutoModelForCausalLM.from_pretrained( checkpoint, device_map="auto", offload_folder="offload", offload_state_dict = True, torch_dtype=torch.float16 ) ``` This will fit in Colab, but will be so close to using all the RAM available that it will go out of RAM when you try to generate a prediction. To get a model we can use, we need to offload one more layer on the disk. We can do so by taking the `device_map` computed in the previous section, adapting it a bit, then passing it to the `from_pretrained` call: ```python import torch from transformers import AutoModelForCausalLM checkpoint = "facebook/opt-13b" device_map["model.decoder.layers.37"] = "disk" model = AutoModelForCausalLM.from_pretrained( checkpoint, device_map=device_map, offload_folder="offload", offload_state_dict = True, torch_dtype=torch.float16 ) ``` ## Running a model split on several devices One last part we haven't touched is how Accelerate enables your model to run with its weight spread across several GPUs, CPU RAM, and the disk folder. This is done very simply using hooks. > [hooks](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.register_forward_hook) are a PyTorch API that adds functions executed just before each forward called We couldn't use this directly since they only support models with regular arguments and no keyword arguments in their forward pass, but we took the same idea. Once the model is loaded, the `dispatch_model` function will add hooks to every module and submodule that are executed before and after each forward pass. They will: - make sure all the inputs of the module are on the same device as the weights; - if the weights have been offloaded to the CPU, move them to GPU 0 before the forward pass and back to the CPU just after; - if the weights have been offloaded to disk, load them in RAM then on the GPU 0 before the forward pass and free this memory just after. The whole process is summarized in the following video: <iframe width="560" height="315" src="https://www.youtube.com/embed/MWCSGj9jEAo" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> This way, your model can be loaded and run even if you don't have enough GPU RAM and CPU RAM. The only thing you need is disk space (and lots of patience!) While this solution is pretty naive if you have multiple GPUs (there is no clever pipeline parallelism involved, just using the GPUs sequentially) it still yields [pretty decent results for BLOOM](https://huggingface.co/blog/bloom-inference-pytorch-scripts). And it allows you to run the model on smaller setups (albeit more slowly). To learn more about Accelerate big model inference, see the [documentation](https://huggingface.co/docs/accelerate/usage_guides/big_modeling).
blog/accelerate-large-models.md/0
{ "file_path": "blog/accelerate-large-models.md", "repo_id": "blog", "token_count": 4961 }
24
--- title: "TTS Arena: Benchmarking Text-to-Speech Models in the Wild" thumbnail: /blog/assets/arenas-on-the-hub/thumbnail.png authors: - user: mrfakename guest: true - user: reach-vb - user: clefourrier - user: Wauplin - user: ylacombe - user: main-horse guest: true - user: sanchit-gandhi --- # TTS Arena: Benchmarking Text-to-Speech Models in the Wild Automated measurement of the quality of text-to-speech (TTS) models is very difficult. Assessing the naturalness and inflection of a voice is a trivial task for humans, but it is much more difficult for AI. This is why today, we’re thrilled to announce the TTS Arena. Inspired by [LMSys](https://lmsys.org/)'s [Chatbot Arena](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) for LLMs, we developed a tool that allows anyone to easily compare TTS models side-by-side. Just submit some text, listen to two different models speak it out, and vote on which model you think is the best. The results will be organized into a leaderboard that displays the community’s highest-rated models. <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/4.19.2/gradio.js"> </script> <gradio-app theme_mode="light" space="TTS-AGI/TTS-Arena"></gradio-app> ## Motivation The field of speech synthesis has long lacked an accurate method to measure the quality of different models. Objective metrics like WER (word error rate) are unreliable measures of model quality, and subjective measures such as MOS (mean opinion score) are typically small-scale experiments conducted with few listeners. As a result, these measurements are generally not useful for comparing two models of roughly similar quality. To address these drawbacks, we are inviting the community to rank models in an easy-to-use interface. By opening this tool and disseminating results to the public, we aim to democratize how models are ranked and to make model comparison and selection accessible to everyone. ## The TTS Arena Human ranking for AI systems is not a novel approach. Recently, LMSys applied this method in their [Chatbot Arena](https://arena.lmsys.org/) with great results, collecting over 300,000 rankings so far. Because of its success, we adopted a similar framework for our leaderboard, inviting any person to rank synthesized audio. The leaderboard allows a user to enter text, which will be synthesized by two models. After listening to each sample, the user will vote on which model sounds more natural. Due to the risks of human bias and abuse, model names will be revealed only after a vote is submitted. ## Selected Models We selected several SOTA (State of the Art) models for our leaderboard. While most are open-source models, we also included several proprietary models to allow developers to compare the state of open-source development with proprietary models. The models available at launch are: - ElevenLabs (proprietary) - MetaVoice - OpenVoice - Pheme - WhisperSpeech - XTTS Although there are many other open and closed source models available, we chose these because they are generally accepted as the highest-quality publicly available models. ## The TTS Leaderboard The results from Arena voting will be made publicly available in a dedicated leaderboard. Note that it will be initially empty until sufficient votes are accumulated, then models will gradually appear. As raters submit new votes, the leaderboard will automatically update. Similar to the Chatbot Arena, models will be ranked using an algorithm similar to the [Elo rating system](https://en.wikipedia.org/wiki/Elo_rating_system), commonly used in chess and other games. ## Conclusion We hope the [TTS Arena](https://huggingface.co/spaces/TTS-AGI/TTS-Arena) proves to be a helpful resource for all developers. We'd love to hear your feedback! Please do not hesitate to let us know if you have any questions or suggestions by sending us an [X/Twitter DM](https://twitter.com/realmrfakename), or by opening a discussion in [the community tab of the Space](https://huggingface.co/spaces/TTS-AGI/TTS-Arena/discussions). ## Credits Special thanks to all the people who helped make this possible, including [Clémentine Fourrier](https://twitter.com/clefourrier), [Lucian Pouget](https://twitter.com/wauplin), [Yoach Lacombe](https://twitter.com/yoachlacombe), [Main Horse](https://twitter.com/main_horse), and the Hugging Face team. In particular, I’d like to thank [VB](https://twitter.com/reach_vb) for his time and technical assistance. I’d also like to thank [Sanchit Gandhi](https://twitter.com/sanchitgandhi99) and [Apolinário Passos](https://twitter.com/multimodalart) for their feedback and support during the development process.
blog/arena-tts.md/0
{ "file_path": "blog/arena-tts.md", "repo_id": "blog", "token_count": 1235 }
25
# Some Notes on Pros of Open Science and Open Source - **Pooling Resources**: Building off of one another’s strengths; learning from one another’s failures. - **Accessibility**: Anyone can use the models, regardless of budget or affiliation. - This also helps to ensure diversity of contributors. - **Lowering Barriers**: You don’t need to have a tech job to explore how AI works. - **Innovation**: High-value applications are possible for more people to discover and create. - Relatedly, advancements in **addressing bias/harms** become more possible. - **Economic Opportunity**: More access leads to more businesses and jobs. - **Transparency**: Users and those affected have full visibility on the model and the training data. They can better identify potential biases or errors. - **Accountability**: Provenance to trace who-did-what; independent auditing possible. - **Privacy**: Users don't have to send their data to black box APIs. - **IP protection**: Users train their models on their data, and own them. - **Freedom of choice**: Users are not locked in. They can switch models anytime. - **IT flexibility**: Users can train and deploy models anywhere they like. - **Tailored use**: Users can train/fine-tune for their specific needs. - **Safety**: More mechanisms available. - **Speed**: Good ideas can quickly flourish and be built on. Security issues can be quickly addressed. - **Diversity** of options. # Cons of Closed Source - **Centralization** of power. - **Opacity** of subtle bias/harm issues. - Hiding **illegal** or problematic data. - **Bare minimum of legal compliance** as opposed to good practices. - Fostering **misunderstanding for hype and profit**. - **Insularity of thinking** creates "groupthink" technology issues (such as harming people with marginalized characteristics). - **Security issues** not addressed quickly. - Consumer apps **can’t be flexible** and become dependent on a single model: Consumer apps built on top of closed source must “lock-in” their code based on what an API outputs; as closed source internal models are updated or changed, this can completely break the consumer’s system, or the consumer’s expectations of behavior. # Common Misunderstandings ## There’s an idea that open source is “less secure”. - Misses that closed software has just as dire (or more so) security concerns as open source. - Misses the fact that the diversity of options available with open source limits how many people will be affected by a malicious actor. ## There’s an idea that open source will help China to “beat us”. - Misses that part of why U.S. technology has flourished due to open science/open source. - Misses that U.S. dominance is a function of how friendly the U.S. is to companies: There is more to success than the code itself, the socioeconomic variables that the U.S. provides is particularly well-placed to help open companies flourish.
blog/assets/164_ethics-soc-5/why_open.md/0
{ "file_path": "blog/assets/164_ethics-soc-5/why_open.md", "repo_id": "blog", "token_count": 706 }
26
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0
Edit dataset card